python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include "qed.h" #include "qed_dev_api.h" #include "qed_hw.h" #include "qed_l2.h" #include "qed_mcp.h" #include "qed_ptp.h" #include "qed_reg_addr.h" /* 16 nano second time quantas to wait before making a Drift adjustment */ #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 /* Nano seconds to add/subtract when making a Drift adjustment */ #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 /* Add/subtract the Adjustment_Value when making a Drift adjustment */ #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 #define QED_TIMESTAMP_MASK BIT(16) /* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ #define QED_PTP_UCAST_PARAM_MASK 0x70F static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { switch (MFW_PORT(p_hwfn)) { case 0: return QED_RESC_LOCK_PTP_PORT0; case 1: return QED_RESC_LOCK_PTP_PORT1; case 2: return QED_RESC_LOCK_PTP_PORT2; case 3: return QED_RESC_LOCK_PTP_PORT3; default: return QED_RESC_LOCK_RESC_INVALID; } } static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_resc_lock_params params; enum qed_resc_lock resource; int rc; resource = qed_ptcdev_to_resc(p_hwfn); if (resource == QED_RESC_LOCK_RESC_INVALID) return -EINVAL; qed_mcp_resc_lock_default_init(&params, NULL, resource, true); rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params); if (rc && rc != -EINVAL) { return rc; } else if (rc == -EINVAL) { /* MFW doesn't support resource locking, first PF on the port * has lock ownership. */ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) return 0; DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); return -EBUSY; } else if (!params.b_granted) { DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); return -EBUSY; } return 0; } static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_resc_unlock_params params; enum qed_resc_lock resource; int rc; resource = qed_ptcdev_to_resc(p_hwfn); if (resource == QED_RESC_LOCK_RESC_INVALID) return -EINVAL; qed_mcp_resc_lock_default_init(NULL, &params, resource, true); rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params); if (rc == -EINVAL) { /* MFW doesn't support locking, first PF has lock ownership */ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) { rc = 0; } else { DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); return -EINVAL; } } else if (rc) { DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n"); } return rc; } /* Read Rx timestamp */ static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 val; *timestamp = 0; val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); if (!(val & QED_TIMESTAMP_MASK)) { DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); return -EINVAL; } val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); *timestamp <<= 32; *timestamp |= val; /* Reset timestamp register to allow new timestamp */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; } /* Read Tx timestamp */ static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 val; *timestamp = 0; val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); if (!(val & QED_TIMESTAMP_MASK)) { DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Invalid Tx timestamp, buf_seqid = %08x\n", val); return -EINVAL; } val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); *timestamp <<= 32; *timestamp |= val; /* Reset timestamp register to allow new timestamp */ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; } /* Read Phy Hardware Clock */ static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 temp = 0; temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); *phc_cycles <<= 32; *phc_cycles |= temp; return 0; } /* Filter PTP protocol packets that need to be timestamped */ static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev, enum qed_ptp_filter_type rx_type, enum qed_ptp_hwtstamp_tx_type tx_type) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 rule_mask, enable_cfg = 0x0; switch (rx_type) { case QED_PTP_FILTER_NONE: enable_cfg = 0x0; rule_mask = 0x3FFF; break; case QED_PTP_FILTER_ALL: enable_cfg = 0x7; rule_mask = 0x3CAA; break; case QED_PTP_FILTER_V1_L4_EVENT: enable_cfg = 0x3; rule_mask = 0x3FFA; break; case QED_PTP_FILTER_V1_L4_GEN: enable_cfg = 0x3; rule_mask = 0x3FFE; break; case QED_PTP_FILTER_V2_L4_EVENT: enable_cfg = 0x5; rule_mask = 0x3FAA; break; case QED_PTP_FILTER_V2_L4_GEN: enable_cfg = 0x5; rule_mask = 0x3FEE; break; case QED_PTP_FILTER_V2_L2_EVENT: enable_cfg = 0x5; rule_mask = 0x3CFF; break; case QED_PTP_FILTER_V2_L2_GEN: enable_cfg = 0x5; rule_mask = 0x3EFF; break; case QED_PTP_FILTER_V2_EVENT: enable_cfg = 0x5; rule_mask = 0x3CAA; break; case QED_PTP_FILTER_V2_GEN: enable_cfg = 0x5; rule_mask = 0x3EEE; break; default: DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type); return -EINVAL; } qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, QED_PTP_UCAST_PARAM_MASK); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg); if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) { qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); } else { qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, QED_PTP_UCAST_PARAM_MASK); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask); } /* Reset possibly old timestamps */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; } /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. * FW/HW accepts the adjustment value in terms of 3 parameters: * Drift period - adjustment happens once in certain number of nano seconds. * Drift value - time is adjusted by a certain value, for example by 5 ns. * Drift direction - add or subtract the adjustment value. * The routine translates ppb into the adjustment triplet in an optimal manner. */ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) { s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; u32 drift_ctr_cfg = 0, drift_state; int drift_dir = 1; if (ppb < 0) { ppb = -ppb; drift_dir = 0; } if (ppb > 1) { s64 best_dif = ppb, best_approx_dev = 1; /* Adjustment value is up to +/-7ns, find an optimal value in * this range. */ for (val = 7; val > 0; val--) { period = div_s64(val * 1000000000, ppb); period -= 8; period >>= 4; if (period < 1) period = 1; if (period > 0xFFFFFFE) period = 0xFFFFFFE; /* Check both rounding ends for approximate error */ approx_dev = period * 16 + 8; dif = ppb * approx_dev - val * 1000000000; dif2 = dif + 16 * ppb; if (dif < 0) dif = -dif; if (dif2 < 0) dif2 = -dif2; /* Determine which end gives better approximation */ if (dif * (approx_dev + 16) > dif2 * approx_dev) { period++; approx_dev += 16; dif = dif2; } /* Track best approximation found so far */ if (best_dif * approx_dev > dif * best_approx_dev) { best_dif = dif; best_val = val; best_period = period; best_approx_dev = approx_dev; } } } else if (ppb == 1) { /* This is a special case as its the only value which wouldn't * fit in a s64 variable. In order to prevent castings simple * handle it seperately. */ best_val = 4; best_period = 0xee6b27f; } else { best_val = 0; best_period = 0xFFFFFFF; } drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); if (drift_state & 1) { qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, drift_ctr_cfg); } else { DP_INFO(p_hwfn, "Drift counter is not reset\n"); return -EINVAL; } qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); return 0; } static int qed_ptp_hw_enable(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; int rc; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n"); return -EBUSY; } p_hwfn->p_ptp_ptt = p_ptt; rc = qed_ptp_res_lock(p_hwfn, p_ptt); if (rc) { DP_INFO(p_hwfn, "Couldn't acquire the resource lock, skip ptp enable for this PF\n"); qed_ptt_release(p_hwfn, p_ptt); p_hwfn->p_ptp_ptt = NULL; return rc; } /* Reset PTP event detection rules - will be configured in the IOCTL */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); /* Pause free running counter */ if (QED_IS_BB_B0(p_hwfn->cdev)) qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); if (QED_IS_AH(p_hwfn->cdev)) qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); /* Resume free running counter */ if (QED_IS_BB_B0(p_hwfn->cdev)) qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); if (QED_IS_AH(p_hwfn->cdev)) { qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4); qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1); } /* Disable drift register */ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); /* Reset possibly old timestamps */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, QED_TIMESTAMP_MASK); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); return 0; } static int qed_ptp_hw_disable(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; qed_ptp_res_unlock(p_hwfn, p_ptt); /* Reset PTP event detection rules */ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); /* Disable the PTP feature */ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); qed_ptt_release(p_hwfn, p_ptt); p_hwfn->p_ptp_ptt = NULL; return 0; } const struct qed_eth_ptp_ops qed_ptp_ops_pass = { .cfg_filters = qed_ptp_hw_cfg_filters, .read_rx_ts = qed_ptp_hw_read_rx_ts, .read_tx_ts = qed_ptp_hw_read_tx_ts, .read_cc = qed_ptp_hw_read_cc, .adjfreq = qed_ptp_hw_adjfreq, .disable = qed_ptp_hw_disable, .enable = qed_ptp_hw_enable, };
linux-master
drivers/net/ethernet/qlogic/qed/qed_ptp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/dcbnl.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_rdma.h" #ifdef CONFIG_DCB #include <linux/qed/qed_eth_if.h> #endif #define QED_DCBX_MAX_MIB_READ_TRY (100) #define QED_ETH_TYPE_DEFAULT (0) #define QED_ETH_TYPE_ROCE (0x8915) #define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7) #define QED_ETH_TYPE_FCOE (0x8906) #define QED_TCP_PORT_ISCSI (0xCBC) #define QED_DCBX_INVALID_PRIORITY 0xFF /* Get Traffic Class from priority traffic class table, 4 bits represent * the traffic class corresponding to the priority. */ #define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI}, {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE}, {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE}, {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE}, {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}, }; static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_ETHTYPE); } static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) { u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); /* Old MFW */ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) return qed_dcbx_app_ethtype(app_info_bitmap); return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); } static bool qed_dcbx_app_port(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_PORT); } static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) { u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); /* Old MFW */ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) return qed_dcbx_app_port(app_info_bitmap); return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); } static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool ethtype; if (ieee) ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); else ethtype = qed_dcbx_app_ethtype(app_info_bitmap); return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool port; if (ieee) port = qed_dcbx_ieee_app_port(app_info_bitmap, DCBX_APP_SF_IEEE_TCP_PORT); else port = qed_dcbx_app_port(app_info_bitmap); return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); } static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool ethtype; if (ieee) ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); else ethtype = qed_dcbx_app_ethtype(app_info_bitmap); return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); } static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool ethtype; if (ieee) ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); else ethtype = qed_dcbx_app_ethtype(app_info_bitmap); return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); } static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { bool port; if (ieee) port = qed_dcbx_ieee_app_port(app_info_bitmap, DCBX_APP_SF_IEEE_UDP_PORT); else port = qed_dcbx_app_port(app_info_bitmap); return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); } static void qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) { enum dcbx_protocol_type id; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n", p_data->dcbx_enabled); for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { id = qed_dcbx_app_update[i].id; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", qed_dcbx_app_update[i].name, p_data->arr[id].update, p_data->arr[id].enable, p_data->arr[id].priority, p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc); } } static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { /* PF update ramrod data */ p_data->arr[type].enable = enable; p_data->arr[type].priority = prio; p_data->arr[type].tc = tc; if (enable) p_data->arr[type].update = UPDATE_DCB; else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits)) p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ if (app_tlv && p_hwfn->hw_info.personality == personality) qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); /* Configure dcbx vlan priority in doorbell block for roce EDPM */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && type == DCBX_PROTOCOL_ROCE) { qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); } } /* Update app protocol data and hw_info fields with the TLV info */ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type) { enum qed_pci_personality personality; enum dcbx_protocol_type id; int i; for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { id = qed_dcbx_app_update[i].id; if (type != id) continue; personality = qed_dcbx_app_update[i].personality; qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, prio, tc, type, personality); } } static bool qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, u32 app_prio_bitmap, u16 id, enum dcbx_protocol_type *type, bool ieee) { if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_FCOE; } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ISCSI; } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "No action required, App TLV entry = 0x%x\n", app_prio_bitmap); return false; } return true; } /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) { enum dcbx_protocol_type type; bool enable, ieee, eth_tlv; u8 tc, priority_map; u16 protocol_id; int priority; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); eth_tlv = false; /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PROTOCOL_ID); priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); priority = ffs(priority_map) - 1; if (priority < 0) { DP_ERR(p_hwfn, "Invalid priority\n"); return -EINVAL; } tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, protocol_id, &type, ieee)) { /* ETH always have the enable bit reset, as it gets * vlan information per packet. For other protocols, * should be set according to the dcbx_enabled * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ if (type == DCBX_PROTOCOL_ETH) { enable = false; eth_tlv = true; } else { enable = true; } qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, enable, priority, tc, type); } } /* If Eth TLV is not detected, use UFP TC as default TC */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; /* Update ramrod protocol data and hw_info fields * with default info when corresponding APP TLV's are not detected. * The enabled field has a different logic for ethernet as only for * ethernet dcb should disabled by default, as the information arrives * from the OS (unless an explicit app tlv was present). */ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { if (p_data->arr[type].update) continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, priority, tc, type); } return 0; } /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dcbx_app_priority_feature *p_app; struct dcbx_app_priority_entry *p_tbl; struct qed_dcbx_results data = { 0 }; struct dcbx_ets_feature *p_ets; struct qed_hw_info *p_info; u32 pri_tc_tbl, flags; u8 dcbx_version; int num_entries; int rc = 0; flags = p_hwfn->p_dcbx_info->operational.flags; dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); p_app = &p_hwfn->p_dcbx_info->operational.features.app; p_tbl = p_app->app_pri_tbl; p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; pri_tc_tbl = p_ets->pri_tc_tbl[0]; p_info = &p_hwfn->hw_info; num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, num_entries, dcbx_version); if (rc) return rc; p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC); data.pf_id = p_hwfn->rel_pf_id; data.dcbx_enabled = !!dcbx_version; qed_dcbx_dp_protocol(p_hwfn, &data); memcpy(&p_hwfn->p_dcbx_info->results, &data, sizeof(struct qed_dcbx_results)); return 0; } static int qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_mib_meta_data *p_data, enum qed_mib_read_type type) { u32 prefix_seq_num, suffix_seq_num; int read_count = 0; int rc = 0; /* The data is considered to be valid only if both sequence numbers are * the same. */ do { if (type == QED_DCBX_REMOTE_LLDP_MIB) { qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, p_data->addr, p_data->size); prefix_seq_num = p_data->lldp_remote->prefix_seq_num; suffix_seq_num = p_data->lldp_remote->suffix_seq_num; } else { qed_memcpy_from(p_hwfn, p_ptt, p_data->mib, p_data->addr, p_data->size); prefix_seq_num = p_data->mib->prefix_seq_num; suffix_seq_num = p_data->mib->suffix_seq_num; } read_count++; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", type, read_count, prefix_seq_num, suffix_seq_num); } while ((prefix_seq_num != suffix_seq_num) && (read_count < QED_DCBX_MAX_MIB_READ_TRY)); if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) { DP_ERR(p_hwfn, "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", type, read_count, prefix_seq_num, suffix_seq_num); rc = -EIO; } return rc; } static void qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, struct qed_dcbx_app_prio *p_prio, struct qed_dcbx_results *p_results) { u8 val; p_prio->roce = QED_DCBX_INVALID_PRIORITY; p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY; p_prio->iscsi = QED_DCBX_INVALID_PRIORITY; p_prio->fcoe = QED_DCBX_INVALID_PRIORITY; if (p_results->arr[DCBX_PROTOCOL_ROCE].update && p_results->arr[DCBX_PROTOCOL_ROCE].enable) p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority; if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update && p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) { val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority; p_prio->roce_v2 = val; } if (p_results->arr[DCBX_PROTOCOL_ISCSI].update && p_results->arr[DCBX_PROTOCOL_ISCSI].enable) p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority; if (p_results->arr[DCBX_PROTOCOL_FCOE].update && p_results->arr[DCBX_PROTOCOL_FCOE].enable) p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority; if (p_results->arr[DCBX_PROTOCOL_ETH].update && p_results->arr[DCBX_PROTOCOL_ETH].enable) p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n", p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe, p_prio->eth); } static void qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, struct qed_dcbx_params *p_params, bool ieee) { struct qed_app_entry *entry; u8 pri_map; int i; p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_WILLING); p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED); p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_params->app_entry[i]; if (ieee) { u8 sf_ieee; u32 val; sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_SF_IEEE); switch (sf_ieee) { case DCBX_APP_SF_IEEE_RESERVED: /* Old MFW */ val = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_SF); entry->sf_ieee = val ? QED_DCBX_SF_IEEE_TCP_UDP_PORT : QED_DCBX_SF_IEEE_ETHTYPE; break; case DCBX_APP_SF_IEEE_ETHTYPE: entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; break; case DCBX_APP_SF_IEEE_TCP_PORT: entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; break; case DCBX_APP_SF_IEEE_UDP_PORT: entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; break; case DCBX_APP_SF_IEEE_TCP_UDP_PORT: entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; break; } } else { entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_SF)); } pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); entry->prio = ffs(pri_map) - 1; entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PROTOCOL_ID); qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, entry->proto_id, &entry->proto_type, ieee); } DP_VERBOSE(p_hwfn, QED_MSG_DCB, "APP params: willing %d, valid %d error = %d\n", p_params->app_willing, p_params->app_valid, p_params->app_error); } static void qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn, u32 pfc, struct qed_dcbx_params *p_params) { u8 pfc_map; p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING); p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS); p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED); pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP); p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0); p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1); p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2); p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3); p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4); p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5); p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6); p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); DP_VERBOSE(p_hwfn, QED_MSG_DCB, "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n", p_params->pfc.willing, pfc_map, p_params->pfc.max_tc, p_params->pfc.enabled); } static void qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { __be32 bw_map[2], tsa_map[2]; u32 pri_map; int i; p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_WILLING); p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_ENABLED); p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS); p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); DP_VERBOSE(p_hwfn, QED_MSG_DCB, "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", p_params->ets_willing, p_params->ets_enabled, p_params->ets_cbs, p_ets->pri_tc_tbl[0], p_params->max_ets_tc); if (p_params->ets_enabled && !p_params->max_ets_tc) { p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "ETS params: max_ets_tc is forced to %d\n", p_params->max_ets_tc); } /* 8 bit tsa and bw data corresponding to each of the 8 TC's are * encoded in a type u32 array of size 2. */ cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2); cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2); pri_map = p_ets->pri_tc_tbl[0]; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i); DP_VERBOSE(p_hwfn, QED_MSG_DCB, "elem %d bw_tbl %x tsa_tbl %x\n", i, p_params->ets_tc_bw_tbl[i], p_params->ets_tc_tsa_tbl[i]); } } static void qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, struct dcbx_ets_feature *p_ets, u32 pfc, struct qed_dcbx_params *p_params, bool ieee) { qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); } static void qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct dcbx_features *p_feat; p_feat = &p_hwfn->p_dcbx_info->local_admin.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, p_feat->pfc, &params->local.params, false); params->local.valid = true; } static void qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct dcbx_features *p_feat; p_feat = &p_hwfn->p_dcbx_info->remote.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, p_feat->pfc, &params->remote.params, false); params->remote.valid = true; } static void qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct qed_dcbx_operational_params *p_operational; struct qed_dcbx_results *p_results; struct dcbx_features *p_feat; bool enabled, err; u32 flags; bool val; flags = p_hwfn->p_dcbx_info->operational.flags; /* If DCBx version is non zero, then negotiation * was successfuly performed */ p_operational = &params->operational; enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != DCBX_CONFIG_VERSION_DISABLED); if (!enabled) { p_operational->enabled = enabled; p_operational->valid = false; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n"); return; } p_feat = &p_hwfn->p_dcbx_info->operational.features; p_results = &p_hwfn->p_dcbx_info->results; val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == DCBX_CONFIG_VERSION_IEEE); p_operational->ieee = val; val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == DCBX_CONFIG_VERSION_CEE); p_operational->cee = val; val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == DCBX_CONFIG_VERSION_STATIC); p_operational->local = val; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d, static %d\n", p_operational->ieee, p_operational->cee, p_operational->local); qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, p_feat->pfc, &params->operational.params, p_operational->ieee); qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); p_operational->err = err; p_operational->enabled = enabled; p_operational->valid = true; } static void qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct lldp_config_params_s *p_local; p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, sizeof(p_local->local_port_id)); } static void qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) { struct lldp_status_params_s *p_remote; p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, sizeof(p_remote->peer_port_id)); } static int qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params, enum qed_mib_read_type type) { switch (type) { case QED_DCBX_REMOTE_MIB: qed_dcbx_get_remote_params(p_hwfn, p_params); break; case QED_DCBX_LOCAL_MIB: qed_dcbx_get_local_params(p_hwfn, p_params); break; case QED_DCBX_OPERATIONAL_MIB: qed_dcbx_get_operational_params(p_hwfn, p_params); break; case QED_DCBX_REMOTE_LLDP_MIB: qed_dcbx_get_remote_lldp_params(p_hwfn, p_params); break; case QED_DCBX_LOCAL_LLDP_MIB: qed_dcbx_get_local_lldp_params(p_hwfn, p_params); break; default: DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); return -EINVAL; } return 0; } static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, lldp_config_params); data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; data.size = sizeof(struct lldp_config_params_s); qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); return 0; } static int qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mib_read_type type) { struct qed_dcbx_mib_meta_data data; int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, lldp_status_params); data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; data.size = sizeof(struct lldp_status_params_s); rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); return rc; } static int qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mib_read_type type) { struct qed_dcbx_mib_meta_data data; int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, operational_dcbx_mib); data.mib = &p_hwfn->p_dcbx_info->operational; data.size = sizeof(struct dcbx_mib); rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); return rc; } static int qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mib_read_type type) { struct qed_dcbx_mib_meta_data data; int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, remote_dcbx_mib); data.mib = &p_hwfn->p_dcbx_info->remote; data.size = sizeof(struct dcbx_mib); rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); return rc; } static int qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, local_admin_dcbx_mib); data.local_admin = &p_hwfn->p_dcbx_info->local_admin; data.size = sizeof(struct dcbx_local_params); qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); return 0; } static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mib_read_type type) { int rc = -EINVAL; switch (type) { case QED_DCBX_OPERATIONAL_MIB: rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type); break; case QED_DCBX_REMOTE_MIB: rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type); break; case QED_DCBX_LOCAL_MIB: rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt); break; case QED_DCBX_REMOTE_LLDP_MIB: rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); break; case QED_DCBX_LOCAL_LLDP_MIB: rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); break; default: DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); } return rc; } static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) { struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; void *cookie = hwfn->cdev->ops_cookie; if (cookie && op->dcbx_aen) op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type); } /* Read updated MIB. * Reconfigure QM and invoke PF update ramrod command if operational MIB * change is detected. */ int qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mib_read_type type) { int rc = 0; rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); if (rc) return rc; if (type == QED_DCBX_OPERATIONAL_MIB) { rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results */ qed_qm_reconf(p_hwfn, p_ptt); /* update storm FW with negotiation results */ qed_sp_pf_update(p_hwfn); /* for roce PFs, we may want to enable/disable DPM * when DCBx change occurs */ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) qed_roce_dpm_dcbx(p_hwfn, p_ptt); } } qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); if (type == QED_DCBX_OPERATIONAL_MIB) { struct qed_dcbx_results *p_data; u16 val; /* Configure in NIG which protocols support EDPM and should * honor PFC. */ p_data = &p_hwfn->p_dcbx_info->results; val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) | (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc); val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT; val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN; qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val); } qed_dcbx_aen(p_hwfn, type); return rc; } int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) { p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL); if (!p_hwfn->p_dcbx_info) return -ENOMEM; return 0; } void qed_dcbx_info_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_dcbx_info); p_hwfn->p_dcbx_info = NULL; } static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, struct qed_dcbx_results *p_src, enum dcbx_protocol_type type) { p_data->dcb_enable_flag = p_src->arr[type].enable; p_data->dcb_priority = p_src->arr[type].priority; p_data->dcb_tc = p_src->arr[type].tc; p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; } /* Set pf update ramrod command params */ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, struct pf_update_ramrod_data *p_dest) { struct protocol_dcb_data *p_dcb_data; u8 update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; p_dest->update_fcoe_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; p_dest->update_roce_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; p_dest->update_rroce_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; p_dest->update_iscsi_dcb_data_mode = update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; p_dest->update_eth_dcb_data_mode = update_flag; p_dcb_data = &p_dest->fcoe_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); p_dcb_data = &p_dest->roce_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE); p_dcb_data = &p_dest->rroce_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2); p_dcb_data = &p_dest->iscsi_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI); p_dcb_data = &p_dest->eth_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); } u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri) { struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get; if (pri >= QED_MAX_PFC_PRIORITIES) { DP_ERR(p_hwfn, "Invalid priority %d\n", pri); return QED_DCBX_DEFAULT_TC; } if (!dcbx_info->operational.valid) { DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx parameters not available\n"); return QED_DCBX_DEFAULT_TC; } return dcbx_info->operational.params.ets_pri_tc_tbl[pri]; } #ifdef CONFIG_DCB static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_get, enum qed_mib_read_type type) { struct qed_ptt *p_ptt; int rc; if (IS_VF(p_hwfn->cdev)) return -EINVAL; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); if (rc) goto out; rc = qed_dcbx_get_params(p_hwfn, p_get, type); out: qed_ptt_release(p_hwfn, p_ptt); return rc; } static void qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn, u32 *pfc, struct qed_dcbx_params *p_params) { u8 pfc_map = 0; int i; *pfc &= ~DCBX_PFC_ERROR_MASK; if (p_params->pfc.willing) *pfc |= DCBX_PFC_WILLING_MASK; else *pfc &= ~DCBX_PFC_WILLING_MASK; if (p_params->pfc.enabled) *pfc |= DCBX_PFC_ENABLED_MASK; else *pfc &= ~DCBX_PFC_ENABLED_MASK; *pfc &= ~DCBX_PFC_CAPS_MASK; *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) if (p_params->pfc.prio[i]) pfc_map |= BIT(i); *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); } static void qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { __be32 bw_map[2], tsa_map[2]; u32 val; int i; if (p_params->ets_willing) p_ets->flags |= DCBX_ETS_WILLING_MASK; else p_ets->flags &= ~DCBX_ETS_WILLING_MASK; if (p_params->ets_cbs) p_ets->flags |= DCBX_ETS_CBS_MASK; else p_ets->flags &= ~DCBX_ETS_CBS_MASK; if (p_params->ets_enabled) p_ets->flags |= DCBX_ETS_ENABLED_MASK; else p_ets->flags &= ~DCBX_ETS_ENABLED_MASK; p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; p_ets->pri_tc_tbl[0] = 0; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i]; ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i]; /* Copy the priority value to the corresponding 4 bits in the * traffic class table. */ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); p_ets->pri_tc_tbl[0] |= val; } be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2); be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2); } static void qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct qed_dcbx_params *p_params, bool ieee) { u32 *entry; int i; if (p_params->app_willing) p_app->flags |= DCBX_APP_WILLING_MASK; else p_app->flags &= ~DCBX_APP_WILLING_MASK; if (p_params->app_valid) p_app->flags |= DCBX_APP_ENABLED_MASK; else p_app->flags &= ~DCBX_APP_ENABLED_MASK; p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK; p_app->flags |= (u32)p_params->num_app_entries << DCBX_APP_NUM_ENTRIES_SHIFT; for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_app->app_pri_tbl[i].entry; *entry = 0; if (ieee) { *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); switch (p_params->app_entry[i].sf_ieee) { case QED_DCBX_SF_IEEE_ETHTYPE: *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << DCBX_APP_SF_IEEE_SHIFT); *entry |= ((u32)DCBX_APP_SF_ETHTYPE << DCBX_APP_SF_SHIFT); break; case QED_DCBX_SF_IEEE_TCP_PORT: *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << DCBX_APP_SF_IEEE_SHIFT); *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); break; case QED_DCBX_SF_IEEE_UDP_PORT: *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << DCBX_APP_SF_IEEE_SHIFT); *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); break; case QED_DCBX_SF_IEEE_TCP_UDP_PORT: *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << DCBX_APP_SF_IEEE_SHIFT); *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); break; } } else { *entry &= ~DCBX_APP_SF_MASK; if (p_params->app_entry[i].ethtype) *entry |= ((u32)DCBX_APP_SF_ETHTYPE << DCBX_APP_SF_SHIFT); else *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); } *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; *entry |= ((u32)p_params->app_entry[i].proto_id << DCBX_APP_PROTOCOL_ID_SHIFT); *entry &= ~DCBX_APP_PRI_MAP_MASK; *entry |= ((u32)(p_params->app_entry[i].prio) << DCBX_APP_PRI_MAP_SHIFT); } } static void qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, struct dcbx_local_params *local_admin, struct qed_dcbx_set *params) { bool ieee = false; local_admin->flags = 0; memcpy(&local_admin->features, &p_hwfn->p_dcbx_info->operational.features, sizeof(local_admin->features)); if (params->enabled) { local_admin->config = params->ver_num; ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); } else { local_admin->config = DCBX_CONFIG_VERSION_DISABLED; } DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n", local_admin->config); if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, &params->config.params); if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG) qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets, &params->config.params); if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, &params->config.params, ieee); } int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_set *params, bool hw_commit) { struct dcbx_local_params local_admin; struct qed_dcbx_mib_meta_data data; u32 resp = 0, param = 0; int rc = 0; if (!hw_commit) { memcpy(&p_hwfn->p_dcbx_info->set, params, sizeof(struct qed_dcbx_set)); return 0; } /* clear set-parmas cache */ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set)); memset(&local_admin, 0, sizeof(local_admin)); qed_dcbx_set_local_params(p_hwfn, &local_admin, params); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, local_admin_dcbx_mib); data.local_admin = &local_admin; data.size = sizeof(struct dcbx_local_params); qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size); rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX, 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param); if (rc) DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n"); return rc; } int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_set *params) { struct qed_dcbx_get *dcbx_info; int rc; if (p_hwfn->p_dcbx_info->set.config.valid) { memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); return 0; } dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); if (!dcbx_info) return -ENOMEM; rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); if (rc) { kfree(dcbx_info); return rc; } p_hwfn->p_dcbx_info->set.override_flags = 0; p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED; if (dcbx_info->operational.cee) p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; if (dcbx_info->operational.ieee) p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; if (dcbx_info->operational.local) p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC; p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; BUILD_BUG_ON(sizeof(dcbx_info->operational.params) != sizeof(p_hwfn->p_dcbx_info->set.config.params)); memcpy(&p_hwfn->p_dcbx_info->set.config.params, &dcbx_info->operational.params, sizeof(p_hwfn->p_dcbx_info->set.config.params)); p_hwfn->p_dcbx_info->set.config.valid = true; memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); kfree(dcbx_info); return 0; } static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, enum qed_mib_read_type type) { struct qed_dcbx_get *dcbx_info; dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC); if (!dcbx_info) return NULL; if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { kfree(dcbx_info); return NULL; } if ((type == QED_DCBX_OPERATIONAL_MIB) && !dcbx_info->operational.enabled) { DP_INFO(hwfn, "DCBX is not enabled/operational\n"); kfree(dcbx_info); return NULL; } return dcbx_info; } static u8 qed_dcbnl_getstate(struct qed_dev *cdev) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; bool enabled; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return 0; enabled = dcbx_info->operational.enabled; DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled); kfree(dcbx_info); return enabled; } static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return 1; dcbx_set.enabled = !!state; ptt = qed_ptt_acquire(hwfn); if (!ptt) return 1; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc ? 1 : 0; } static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc); *prio_type = *pgid = *bw_pct = *up_map = 0; if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid tc %d\n", tc); return; } dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return; *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc]; kfree(dcbx_info); } static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; *bw_pct = 0; DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid); if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid pgid %d\n", pgid); return; } dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return; *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid]; DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct); kfree(dcbx_info); } static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); *prio = *bwg_id = *bw_pct = *up_map = 0; } static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev, int bwg_id, u8 *bw_pct) { DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); *bw_pct = 0; } static void qed_dcbnl_getpfccfg(struct qed_dev *cdev, int priority, u8 *setting) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority); if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid priority %d\n", priority); return; } dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return; *setting = dcbx_info->operational.params.pfc.prio[priority]; DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting); kfree(dcbx_info); } static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n", priority, setting); if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid priority %d\n", priority); return; } memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; dcbx_set.config.params.pfc.prio[priority] = !!setting; ptt = qed_ptt_acquire(hwfn); if (!ptt) return; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); } static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; int rc = 0; DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid); dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return 1; switch (capid) { case DCB_CAP_ATTR_PG: case DCB_CAP_ATTR_PFC: case DCB_CAP_ATTR_UP2TC: case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_PG_TCS: case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_DCBX: *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC); break; default: *cap = false; rc = 1; } DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap); kfree(dcbx_info); return rc; } static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; int rc = 0; DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid); dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = dcbx_info->operational.params.max_ets_tc; break; case DCB_NUMTCS_ATTR_PFC: *num = dcbx_info->operational.params.pfc.max_tc; break; default: rc = -EINVAL; } kfree(dcbx_info); DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num); return rc; } static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; bool enabled; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return 0; enabled = dcbx_info->operational.params.pfc.enabled; DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled); kfree(dcbx_info); return enabled; } static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; u8 mode = 0; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return 0; if (dcbx_info->operational.ieee) mode |= DCB_CAP_DCBX_VER_IEEE; if (dcbx_info->operational.cee) mode |= DCB_CAP_DCBX_VER_CEE; if (dcbx_info->operational.local) mode |= DCB_CAP_DCBX_STATIC; DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode); kfree(dcbx_info); return mode; } static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev, int tc, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n", tc, pri_type, pgid, bw_pct, up_map); if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid tc %d\n", tc); return; } memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid; ptt = qed_ptt_acquire(hwfn); if (!ptt) return; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); } static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) { DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); } static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct); if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid pgid %d\n", pgid); return; } memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct; ptt = qed_ptt_acquire(hwfn); if (!ptt) return; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); } static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct) { DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); } static u8 qed_dcbnl_setall(struct qed_dev *cdev) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return 1; ptt = qed_ptt_acquire(hwfn); if (!ptt) return 1; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1); qed_ptt_release(hwfn, ptt); return rc; } static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return 1; switch (tcid) { case DCB_NUMTCS_ATTR_PG: dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; dcbx_set.config.params.max_ets_tc = num; break; case DCB_NUMTCS_ATTR_PFC: dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; dcbx_set.config.params.pfc.max_tc = num; break; default: DP_INFO(hwfn, "Invalid tcid %d\n", tcid); return -EINVAL; } ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EINVAL; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return 0; } static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; dcbx_set.config.params.pfc.enabled = !!state; ptt = qed_ptt_acquire(hwfn); if (!ptt) return; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); } static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_app_entry *entry; bool ethtype; u8 prio = 0; int i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_info->operational.params.app_entry[i]; if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) { prio = entry->prio; break; } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval); kfree(dcbx_info); return -EINVAL; } kfree(dcbx_info); return prio; } static int qed_dcbnl_setapp(struct qed_dev *cdev, u8 idtype, u16 idval, u8 pri_map) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_app_entry *entry; struct qed_ptt *ptt; bool ethtype; int rc, i; memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return -EINVAL; ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_set.config.params.app_entry[i]; if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) break; /* First empty slot */ if (!entry->proto_id) { dcbx_set.config.params.num_app_entries++; break; } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { DP_ERR(cdev, "App table is full\n"); return -EBUSY; } dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; dcbx_set.config.params.app_entry[i].ethtype = ethtype; dcbx_set.config.params.app_entry[i].proto_id = idval; dcbx_set.config.params.app_entry[i].prio = pri_map; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc; } static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode); if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) { DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n"); return 1; } memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return 1; dcbx_set.ver_num = 0; if (mode & DCB_CAP_DCBX_VER_CEE) { dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE; dcbx_set.enabled = true; } if (mode & DCB_CAP_DCBX_VER_IEEE) { dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE; dcbx_set.enabled = true; } if (mode & DCB_CAP_DCBX_STATIC) { dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC; dcbx_set.enabled = true; } ptt = qed_ptt_acquire(hwfn); if (!ptt) return 1; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc; } static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid); dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return 1; *flags = 0; switch (featid) { case DCB_FEATCFG_ATTR_PG: if (dcbx_info->operational.params.ets_enabled) *flags = DCB_FEATCFG_ENABLE; else *flags = DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_PFC: if (dcbx_info->operational.params.pfc.enabled) *flags = DCB_FEATCFG_ENABLE; else *flags = DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_APP: if (dcbx_info->operational.params.app_valid) *flags = DCB_FEATCFG_ENABLE; else *flags = DCB_FEATCFG_ERROR; break; default: DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); kfree(dcbx_info); return 1; } DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags); kfree(dcbx_info); return 0; } static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_set dcbx_set; bool enabled, willing; struct qed_ptt *ptt; int rc; DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n", featid, flags); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return 1; enabled = !!(flags & DCB_FEATCFG_ENABLE); willing = !!(flags & DCB_FEATCFG_WILLING); switch (featid) { case DCB_FEATCFG_ATTR_PG: dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; dcbx_set.config.params.ets_enabled = enabled; dcbx_set.config.params.ets_willing = willing; break; case DCB_FEATCFG_ATTR_PFC: dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; dcbx_set.config.params.pfc.enabled = enabled; dcbx_set.config.params.pfc.willing = willing; break; case DCB_FEATCFG_ATTR_APP: dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; dcbx_set.config.params.app_willing = willing; break; default: DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); return 1; } ptt = qed_ptt_acquire(hwfn); if (!ptt) return 1; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return 0; } static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev, struct dcb_peer_app_info *info, u16 *app_count) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); if (!dcbx_info) return -EINVAL; info->willing = dcbx_info->remote.params.app_willing; info->error = dcbx_info->remote.params.app_error; *app_count = dcbx_info->remote.params.num_app_entries; kfree(dcbx_info); return 0; } static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev, struct dcb_app *table) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; int i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); if (!dcbx_info) return -EINVAL; for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) { if (dcbx_info->remote.params.app_entry[i].ethtype) table[i].selector = DCB_APP_IDTYPE_ETHTYPE; else table[i].selector = DCB_APP_IDTYPE_PORTNUM; table[i].priority = dcbx_info->remote.params.app_entry[i].prio; table[i].protocol = dcbx_info->remote.params.app_entry[i].proto_id; } kfree(dcbx_info); return 0; } static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; int i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); if (!dcbx_info) return -EINVAL; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) if (dcbx_info->remote.params.pfc.prio[i]) pfc->pfc_en |= BIT(i); pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc; DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n", pfc->pfc_en, pfc->tcs_supported); kfree(dcbx_info); return 0; } static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; int i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); if (!dcbx_info) return -EINVAL; pg->willing = dcbx_info->remote.params.ets_willing; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i]; pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i]; } DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing); kfree(dcbx_info); return 0; } static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, struct ieee_pfc *pfc, bool remote) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_params *params; struct qed_dcbx_get *dcbx_info; int rc, i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } if (remote) { memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(hwfn, dcbx_info, QED_DCBX_REMOTE_MIB); if (rc) { kfree(dcbx_info); return -EINVAL; } params = &dcbx_info->remote.params; } else { params = &dcbx_info->operational.params; } pfc->pfc_cap = params->pfc.max_tc; pfc->pfc_en = 0; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) if (params->pfc.prio[i]) pfc->pfc_en |= BIT(i); kfree(dcbx_info); return 0; } static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) { return qed_dcbnl_get_ieee_pfc(cdev, pfc, false); } static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc, i; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } kfree(dcbx_info); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return -EINVAL; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EINVAL; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc; } static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev, struct ieee_ets *ets, bool remote) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_dcbx_params *params; int rc; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } if (remote) { memset(dcbx_info, 0, sizeof(*dcbx_info)); rc = qed_dcbx_query_params(hwfn, dcbx_info, QED_DCBX_REMOTE_MIB); if (rc) { kfree(dcbx_info); return -EINVAL; } params = &dcbx_info->remote.params; } else { params = &dcbx_info->operational.params; } ets->ets_cap = params->max_ets_tc; ets->willing = params->ets_willing; ets->cbs = params->ets_cbs; memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc)); kfree(dcbx_info); return 0; } static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets) { return qed_dcbnl_get_ieee_ets(cdev, ets, false); } static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_dcbx_set dcbx_set; struct qed_ptt *ptt; int rc; dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } kfree(dcbx_info); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return -EINVAL; dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; dcbx_set.config.params.max_ets_tc = ets->ets_cap; dcbx_set.config.params.ets_willing = ets->willing; dcbx_set.config.params.ets_cbs = ets->cbs; memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc, sizeof(ets->prio_tc)); ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EINVAL; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc; } static int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets) { return qed_dcbnl_get_ieee_ets(cdev, ets, true); } static int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) { return qed_dcbnl_get_ieee_pfc(cdev, pfc, true); } static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee) { switch (selector) { case IEEE_8021QAZ_APP_SEL_ETHERTYPE: *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; break; case IEEE_8021QAZ_APP_SEL_STREAM: *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; break; case IEEE_8021QAZ_APP_SEL_DGRAM: *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; break; case IEEE_8021QAZ_APP_SEL_ANY: *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; break; default: return -EINVAL; } return 0; } static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_app_entry *entry; u8 prio = 0; u8 sf_ieee; int i; DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n", app->selector, app->protocol); if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { DP_INFO(cdev, "Invalid selector field value %d\n", app->selector); return -EINVAL; } dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_info->operational.params.app_entry[i]; if ((entry->sf_ieee == sf_ieee) && (entry->proto_id == app->protocol)) { prio = entry->prio; break; } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector, app->protocol); kfree(dcbx_info); return -EINVAL; } app->priority = ffs(prio) - 1; kfree(dcbx_info); return 0; } static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_dcbx_get *dcbx_info; struct qed_dcbx_set dcbx_set; struct qed_app_entry *entry; struct qed_ptt *ptt; u8 sf_ieee; int rc, i; DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n", app->selector, app->protocol, app->priority); if (app->priority >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid priority %d\n", app->priority); return -EINVAL; } if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { DP_INFO(cdev, "Invalid selector field value %d\n", app->selector); return -EINVAL; } dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); if (!dcbx_info) return -EINVAL; if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); kfree(dcbx_info); return -EINVAL; } kfree(dcbx_info); memset(&dcbx_set, 0, sizeof(dcbx_set)); rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); if (rc) return -EINVAL; for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { entry = &dcbx_set.config.params.app_entry[i]; if ((entry->sf_ieee == sf_ieee) && (entry->proto_id == app->protocol)) break; /* First empty slot */ if (!entry->proto_id) { dcbx_set.config.params.num_app_entries++; break; } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { DP_ERR(cdev, "App table is full\n"); return -EBUSY; } dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee; dcbx_set.config.params.app_entry[i].proto_id = app->protocol; dcbx_set.config.params.app_entry[i].prio = BIT(app->priority); ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY; rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); qed_ptt_release(hwfn, ptt); return rc; } const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = { .getstate = qed_dcbnl_getstate, .setstate = qed_dcbnl_setstate, .getpgtccfgtx = qed_dcbnl_getpgtccfgtx, .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx, .getpgtccfgrx = qed_dcbnl_getpgtccfgrx, .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx, .getpfccfg = qed_dcbnl_getpfccfg, .setpfccfg = qed_dcbnl_setpfccfg, .getcap = qed_dcbnl_getcap, .getnumtcs = qed_dcbnl_getnumtcs, .getpfcstate = qed_dcbnl_getpfcstate, .getdcbx = qed_dcbnl_getdcbx, .setpgtccfgtx = qed_dcbnl_setpgtccfgtx, .setpgtccfgrx = qed_dcbnl_setpgtccfgrx, .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx, .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx, .setall = qed_dcbnl_setall, .setnumtcs = qed_dcbnl_setnumtcs, .setpfcstate = qed_dcbnl_setpfcstate, .setapp = qed_dcbnl_setapp, .setdcbx = qed_dcbnl_setdcbx, .setfeatcfg = qed_dcbnl_setfeatcfg, .getfeatcfg = qed_dcbnl_getfeatcfg, .getapp = qed_dcbnl_getapp, .peer_getappinfo = qed_dcbnl_peer_getappinfo, .peer_getapptable = qed_dcbnl_peer_getapptable, .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc, .cee_peer_getpg = qed_dcbnl_cee_peer_getpg, .ieee_getpfc = qed_dcbnl_ieee_getpfc, .ieee_setpfc = qed_dcbnl_ieee_setpfc, .ieee_getets = qed_dcbnl_ieee_getets, .ieee_setets = qed_dcbnl_ieee_setets, .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc, .ieee_peer_getets = qed_dcbnl_ieee_peer_getets, .ieee_getapp = qed_dcbnl_ieee_getapp, .ieee_setapp = qed_dcbnl_ieee_setapp, }; #endif
linux-master
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" #include "qed_rdma.h" #include "qed_nvmetcp.h" static DEFINE_SPINLOCK(qm_lock); /******************** Doorbell Recovery *******************/ /* The doorbell recovery mechanism consists of a list of entries which represent * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each * entity needs to register with the mechanism and provide the parameters * describing it's doorbell, including a location where last used doorbell data * can be found. The doorbell execute function will traverse the list and * doorbell all of the registered entries. */ struct qed_db_recovery_entry { struct list_head list_entry; void __iomem *db_addr; void *db_data; enum qed_db_rec_width db_width; enum qed_db_rec_space db_space; u8 hwfn_idx; }; /* Display a single doorbell recovery entry */ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, struct qed_db_recovery_entry *db_entry, char *action) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", action, db_entry, db_entry->db_addr, db_entry->db_data, db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", db_entry->db_space == DB_REC_USER ? "user" : "kernel", db_entry->hwfn_idx); } /* Doorbell address sanity (address within doorbell bar range) */ static bool qed_db_rec_sanity(struct qed_dev *cdev, void __iomem *db_addr, enum qed_db_rec_width db_width, void *db_data) { u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; /* Make sure doorbell address is within the doorbell bar */ if (db_addr < cdev->doorbells || (u8 __iomem *)db_addr + width > (u8 __iomem *)cdev->doorbells + cdev->db_size) { WARN(true, "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", db_addr, cdev->doorbells, (u8 __iomem *)cdev->doorbells + cdev->db_size); return false; } /* ake sure doorbell data pointer is not null */ if (!db_data) { WARN(true, "Illegal doorbell data pointer: %p", db_data); return false; } return true; } /* Find hwfn according to the doorbell address */ static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, void __iomem *db_addr) { struct qed_hwfn *p_hwfn; /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ if (cdev->num_hwfns > 1) p_hwfn = db_addr < cdev->hwfns[1].doorbells ? &cdev->hwfns[0] : &cdev->hwfns[1]; else p_hwfn = QED_LEADING_HWFN(cdev); return p_hwfn; } /* Add a new entry to the doorbell recovery mechanism */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, void *db_data, enum qed_db_rec_width db_width, enum qed_db_rec_space db_space) { struct qed_db_recovery_entry *db_entry; struct qed_hwfn *p_hwfn; /* Shortcircuit VFs, for now */ if (IS_VF(cdev)) { DP_VERBOSE(cdev, QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); return 0; } /* Sanitize doorbell address */ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) return -EINVAL; /* Obtain hwfn from doorbell address */ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); /* Create entry */ db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); if (!db_entry) { DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); return -ENOMEM; } /* Populate entry */ db_entry->db_addr = db_addr; db_entry->db_data = db_data; db_entry->db_width = db_width; db_entry->db_space = db_space; db_entry->hwfn_idx = p_hwfn->my_id; /* Display */ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); spin_unlock_bh(&p_hwfn->db_recovery_info.lock); return 0; } /* Remove an entry from the doorbell recovery mechanism */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data) { struct qed_db_recovery_entry *db_entry = NULL; struct qed_hwfn *p_hwfn; int rc = -EINVAL; /* Shortcircuit VFs, for now */ if (IS_VF(cdev)) { DP_VERBOSE(cdev, QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); return 0; } /* Obtain hwfn from doorbell address */ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, &p_hwfn->db_recovery_info.list, list_entry) { /* search according to db_data addr since db_addr is not unique (roce) */ if (db_entry->db_data == db_data) { qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); list_del(&db_entry->list_entry); rc = 0; break; } } spin_unlock_bh(&p_hwfn->db_recovery_info.lock); if (rc == -EINVAL) DP_NOTICE(p_hwfn, "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", db_data, db_addr); else kfree(db_entry); return rc; } /* Initialize the doorbell recovery mechanism */ static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); /* Make sure db_size was set in cdev */ if (!p_hwfn->cdev->db_size) { DP_ERR(p_hwfn->cdev, "db_size not set\n"); return -EINVAL; } INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); spin_lock_init(&p_hwfn->db_recovery_info.lock); p_hwfn->db_recovery_info.db_recovery_counter = 0; return 0; } /* Destroy the doorbell recovery mechanism */ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); if (!list_empty(&p_hwfn->db_recovery_info.list)) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); while (!list_empty(&p_hwfn->db_recovery_info.list)) { db_entry = list_first_entry(&p_hwfn->db_recovery_info.list, struct qed_db_recovery_entry, list_entry); qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); list_del(&db_entry->list_entry); kfree(db_entry); } } p_hwfn->db_recovery_info.db_recovery_counter = 0; } /* Print the content of the doorbell recovery mechanism */ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; DP_NOTICE(p_hwfn, "Displaying doorbell recovery database. Counter was %d\n", p_hwfn->db_recovery_info.db_recovery_counter); /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, &p_hwfn->db_recovery_info.list, list_entry) { qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); } spin_unlock_bh(&p_hwfn->db_recovery_info.lock); } /* Ring the doorbell of a single doorbell recovery entry */ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, struct qed_db_recovery_entry *db_entry) { /* Print according to width */ if (db_entry->db_width == DB_REC_WIDTH_32B) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "ringing doorbell address %p data %x\n", db_entry->db_addr, *(u32 *)db_entry->db_data); } else { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "ringing doorbell address %p data %llx\n", db_entry->db_addr, *(u64 *)(db_entry->db_data)); } /* Sanity */ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, db_entry->db_width, db_entry->db_data)) return; /* Flush the write combined buffer. Since there are multiple doorbelling * entities using the same address, if we don't flush, a transaction * could be lost. */ wmb(); /* Ring the doorbell */ if (db_entry->db_width == DB_REC_WIDTH_32B) DIRECT_REG_WR(db_entry->db_addr, *(u32 *)(db_entry->db_data)); else DIRECT_REG_WR64(db_entry->db_addr, *(u64 *)(db_entry->db_data)); /* Flush the write combined buffer. Next doorbell may come from a * different entity to the same address... */ wmb(); } /* Traverse the doorbell recovery entry list and ring all the doorbells */ void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", p_hwfn->db_recovery_info.db_recovery_counter); /* Track amount of times recovery was executed */ p_hwfn->db_recovery_info.db_recovery_counter++; /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, &p_hwfn->db_recovery_info.list, list_entry) qed_db_recovery_ring(p_hwfn, db_entry); spin_unlock_bh(&p_hwfn->db_recovery_info.lock); } /******************** Doorbell Recovery end ****************/ /********************************** NIG LLH ***********************************/ enum qed_llh_filter_type { QED_LLH_FILTER_TYPE_MAC, QED_LLH_FILTER_TYPE_PROTOCOL, }; struct qed_llh_mac_filter { u8 addr[ETH_ALEN]; }; struct qed_llh_protocol_filter { enum qed_llh_prot_filter_type_t type; u16 source_port_or_eth_type; u16 dest_port; }; union qed_llh_filter { struct qed_llh_mac_filter mac; struct qed_llh_protocol_filter protocol; }; struct qed_llh_filter_info { bool b_enabled; u32 ref_cnt; enum qed_llh_filter_type type; union qed_llh_filter filter; }; struct qed_llh_info { /* Number of LLH filters banks */ u8 num_ppfid; #define MAX_NUM_PPFID 8 u8 ppfid_array[MAX_NUM_PPFID]; /* Array of filters arrays: * "num_ppfid" elements of filters banks, where each is an array of * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. */ struct qed_llh_filter_info **pp_filters; }; static void qed_llh_free(struct qed_dev *cdev) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; u32 i; if (p_llh_info) { if (p_llh_info->pp_filters) for (i = 0; i < p_llh_info->num_ppfid; i++) kfree(p_llh_info->pp_filters[i]); kfree(p_llh_info->pp_filters); } kfree(p_llh_info); cdev->p_llh_info = NULL; } static int qed_llh_alloc(struct qed_dev *cdev) { struct qed_llh_info *p_llh_info; u32 size, i; p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL); if (!p_llh_info) return -ENOMEM; cdev->p_llh_info = p_llh_info; for (i = 0; i < MAX_NUM_PPFID; i++) { if (!(cdev->ppfid_bitmap & (0x1 << i))) continue; p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n", p_llh_info->num_ppfid, i); p_llh_info->num_ppfid++; } size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL); if (!p_llh_info->pp_filters) return -ENOMEM; size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(**p_llh_info->pp_filters); for (i = 0; i < p_llh_info->num_ppfid; i++) { p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL); if (!p_llh_info->pp_filters[i]) return -ENOMEM; } return 0; } static int qed_llh_shadow_sanity(struct qed_dev *cdev, u8 ppfid, u8 filter_idx, const char *action) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; if (ppfid >= p_llh_info->num_ppfid) { DP_NOTICE(cdev, "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n", action, ppfid, p_llh_info->num_ppfid); return -EINVAL; } if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { DP_NOTICE(cdev, "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n", action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); return -EINVAL; } return 0; } #define QED_LLH_INVALID_FILTER_IDX 0xff static int qed_llh_shadow_search_filter(struct qed_dev *cdev, u8 ppfid, union qed_llh_filter *p_filter, u8 *p_filter_idx) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; struct qed_llh_filter_info *p_filters; int rc; u8 i; rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search"); if (rc) return rc; *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; p_filters = p_llh_info->pp_filters[ppfid]; for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { if (!memcmp(p_filter, &p_filters[i].filter, sizeof(*p_filter))) { *p_filter_idx = i; break; } } return 0; } static int qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; struct qed_llh_filter_info *p_filters; int rc; u8 i; rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx"); if (rc) return rc; *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; p_filters = p_llh_info->pp_filters[ppfid]; for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { if (!p_filters[i].b_enabled) { *p_filter_idx = i; break; } } return 0; } static int __qed_llh_shadow_add_filter(struct qed_dev *cdev, u8 ppfid, u8 filter_idx, enum qed_llh_filter_type type, union qed_llh_filter *p_filter, u32 *p_ref_cnt) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; struct qed_llh_filter_info *p_filters; int rc; rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add"); if (rc) return rc; p_filters = p_llh_info->pp_filters[ppfid]; if (!p_filters[filter_idx].ref_cnt) { p_filters[filter_idx].b_enabled = true; p_filters[filter_idx].type = type; memcpy(&p_filters[filter_idx].filter, p_filter, sizeof(p_filters[filter_idx].filter)); } *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; return 0; } static int qed_llh_shadow_add_filter(struct qed_dev *cdev, u8 ppfid, enum qed_llh_filter_type type, union qed_llh_filter *p_filter, u8 *p_filter_idx, u32 *p_ref_cnt) { int rc; /* Check if the same filter already exist */ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); if (rc) return rc; /* Find a new entry in case of a new filter */ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx); if (rc) return rc; } /* No free entry was found */ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { DP_NOTICE(cdev, "Failed to find an empty LLH filter to utilize [ppfid %d]\n", ppfid); return -EINVAL; } return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type, p_filter, p_ref_cnt); } static int __qed_llh_shadow_remove_filter(struct qed_dev *cdev, u8 ppfid, u8 filter_idx, u32 *p_ref_cnt) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; struct qed_llh_filter_info *p_filters; int rc; rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove"); if (rc) return rc; p_filters = p_llh_info->pp_filters[ppfid]; if (!p_filters[filter_idx].ref_cnt) { DP_NOTICE(cdev, "LLH shadow: trying to remove a filter with ref_cnt=0\n"); return -EINVAL; } *p_ref_cnt = --p_filters[filter_idx].ref_cnt; if (!p_filters[filter_idx].ref_cnt) memset(&p_filters[filter_idx], 0, sizeof(p_filters[filter_idx])); return 0; } static int qed_llh_shadow_remove_filter(struct qed_dev *cdev, u8 ppfid, union qed_llh_filter *p_filter, u8 *p_filter_idx, u32 *p_ref_cnt) { int rc; rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); if (rc) return rc; /* No matching filter was found */ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n"); return -EINVAL; } return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx, p_ref_cnt); } static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; if (ppfid >= p_llh_info->num_ppfid) { DP_NOTICE(cdev, "ppfid %d is not valid, available indices are 0..%d\n", ppfid, p_llh_info->num_ppfid - 1); *p_abs_ppfid = 0; return -EINVAL; } *p_abs_ppfid = p_llh_info->ppfid_array[ppfid]; return 0; } static int qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; enum qed_eng eng; u8 ppfid; int rc; rc = qed_mcp_get_engine_config(p_hwfn, p_ptt); if (rc != 0 && rc != -EOPNOTSUPP) { DP_NOTICE(p_hwfn, "Failed to get the engine affinity configuration\n"); return rc; } /* RoCE PF is bound to a single engine */ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; rc = qed_llh_set_roce_affinity(cdev, eng); if (rc) { DP_NOTICE(cdev, "Failed to set the RoCE engine affinity\n"); return rc; } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Set the engine affinity of RoCE packets as %d\n", eng); } /* Storage PF is bound to a single engine while L2 PF uses both */ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; else /* L2_PERSONALITY */ eng = QED_BOTH_ENG; for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); if (rc) { DP_NOTICE(cdev, "Failed to set the engine affinity of ppfid %d\n", ppfid); return rc; } } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Set the engine affinity of non-RoCE packets as %d\n", eng); return 0; } static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; u8 ppfid, abs_ppfid; int rc; for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { u32 addr; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) return rc; addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id); } if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && !QED_IS_FCOE_PERSONALITY(p_hwfn)) { rc = qed_llh_add_mac_filter(cdev, 0, p_hwfn->hw_info.hw_mac_addr); if (rc) DP_NOTICE(cdev, "Failed to add an LLH filter with the primary MAC\n"); } if (QED_IS_CMT(cdev)) { rc = qed_llh_set_engine_affin(p_hwfn, p_ptt); if (rc) return rc; } return 0; } u8 qed_llh_get_num_ppfid(struct qed_dev *cdev) { return cdev->p_llh_info->num_ppfid; } #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 addr, val, eng_sel; u8 abs_ppfid; int rc = 0; if (!p_ptt) return -EAGAIN; if (!QED_IS_CMT(cdev)) goto out; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto out; switch (eng) { case QED_ENG0: eng_sel = 0; break; case QED_ENG1: eng_sel = 1; break; case QED_BOTH_ENG: eng_sel = 2; break; default: DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng); rc = -EINVAL; goto out; } addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; val = qed_rd(p_hwfn, p_ptt, addr); SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); qed_wr(p_hwfn, p_ptt, addr, val); /* The iWARP affinity is set as the affinity of ppfid 0 */ if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn)) cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0; out: qed_ptt_release(p_hwfn, p_ptt); return rc; } int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 addr, val, eng_sel; u8 ppfid, abs_ppfid; int rc = 0; if (!p_ptt) return -EAGAIN; if (!QED_IS_CMT(cdev)) goto out; switch (eng) { case QED_ENG0: eng_sel = 0; break; case QED_ENG1: eng_sel = 1; break; case QED_BOTH_ENG: eng_sel = 2; qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, 0xf); /* QP bit 15 */ break; default: DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng); rc = -EINVAL; goto out; } for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto out; addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; val = qed_rd(p_hwfn, p_ptt, addr); SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); qed_wr(p_hwfn, p_ptt, addr, val); } out: qed_ptt_release(p_hwfn, p_ptt); return rc; } struct qed_llh_filter_details { u64 value; u32 mode; u32 protocol_type; u32 hdr_sel; u32 enable; }; static int qed_llh_access_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx, struct qed_llh_filter_details *p_details) { struct qed_dmae_params params = {0}; u32 addr; u8 pfid; int rc; /* The NIG/LLH registers that are accessed in this function have only 16 * rows which are exposed to a PF. I.e. only the 16 filters of its * default ppfid. Accessing filters of other ppfids requires pretending * to another PFs. * The calculation of PPFID->PFID in AH is based on the relative index * of a PF on its port. * For BB the pfid is actually the abs_ppfid. */ if (QED_IS_BB(p_hwfn->cdev)) pfid = abs_ppfid; else pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine + MFW_PORT(p_hwfn); /* Filter enable - should be done first when removing a filter */ if (!p_details->enable) { qed_fid_pretend(p_hwfn, p_ptt, pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_details->enable); qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); } /* Filter value */ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1); params.dst_pfid = pfid; rc = qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&p_details->value, addr, 2 /* size_in_dwords */, &params); if (rc) return rc; qed_fid_pretend(p_hwfn, p_ptt, pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); /* Filter mode */ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_details->mode); /* Filter protocol type */ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type); /* Filter header select */ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel); /* Filter enable - should be done last when adding a filter */ if (p_details->enable) { addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; qed_wr(p_hwfn, p_ptt, addr, p_details->enable); } qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); return 0; } static int qed_llh_add_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high, u32 low) { struct qed_llh_filter_details filter_details; filter_details.enable = 1; filter_details.value = ((u64)high << 32) | low; filter_details.hdr_sel = 0; filter_details.protocol_type = filter_prot_type; /* Mode: 0: MAC-address classification 1: protocol classification */ filter_details.mode = filter_prot_type ? 1 : 0; return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, &filter_details); } static int qed_llh_remove_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) { struct qed_llh_filter_details filter_details = {0}; return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, &filter_details); } int qed_llh_add_mac_filter(struct qed_dev *cdev, u8 ppfid, const u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); union qed_llh_filter filter = {}; u8 filter_idx, abs_ppfid = 0; u32 high, low, ref_cnt; int rc = 0; if (!p_ptt) return -EAGAIN; if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) goto out; memcpy(filter.mac.addr, mac_addr, ETH_ALEN); rc = qed_llh_shadow_add_filter(cdev, ppfid, QED_LLH_FILTER_TYPE_MAC, &filter, &filter_idx, &ref_cnt); if (rc) goto err; /* Configure the LLH only in case of a new the filter */ if (ref_cnt == 1) { rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto err; high = mac_addr[1] | (mac_addr[0] << 8); low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | (mac_addr[2] << 24); rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, 0, high, low); if (rc) goto err; } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); goto out; err: DP_NOTICE(cdev, "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n", mac_addr, ppfid); out: qed_ptt_release(p_hwfn, p_ptt); return rc; } static int qed_llh_protocol_filter_stringify(struct qed_dev *cdev, enum qed_llh_prot_filter_type_t type, u16 source_port_or_eth_type, u16 dest_port, u8 *str, size_t str_len) { switch (type) { case QED_LLH_FILTER_ETHERTYPE: snprintf(str, str_len, "Ethertype 0x%04x", source_port_or_eth_type); break; case QED_LLH_FILTER_TCP_SRC_PORT: snprintf(str, str_len, "TCP src port 0x%04x", source_port_or_eth_type); break; case QED_LLH_FILTER_UDP_SRC_PORT: snprintf(str, str_len, "UDP src port 0x%04x", source_port_or_eth_type); break; case QED_LLH_FILTER_TCP_DEST_PORT: snprintf(str, str_len, "TCP dst port 0x%04x", dest_port); break; case QED_LLH_FILTER_UDP_DEST_PORT: snprintf(str, str_len, "UDP dst port 0x%04x", dest_port); break; case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x", source_port_or_eth_type, dest_port); break; case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x", source_port_or_eth_type, dest_port); break; default: DP_NOTICE(cdev, "Non valid LLH protocol filter type %d\n", type); return -EINVAL; } return 0; } static int qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev, enum qed_llh_prot_filter_type_t type, u16 source_port_or_eth_type, u16 dest_port, u32 *p_high, u32 *p_low) { *p_high = 0; *p_low = 0; switch (type) { case QED_LLH_FILTER_ETHERTYPE: *p_high = source_port_or_eth_type; break; case QED_LLH_FILTER_TCP_SRC_PORT: case QED_LLH_FILTER_UDP_SRC_PORT: *p_low = source_port_or_eth_type << 16; break; case QED_LLH_FILTER_TCP_DEST_PORT: case QED_LLH_FILTER_UDP_DEST_PORT: *p_low = dest_port; break; case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: *p_low = (source_port_or_eth_type << 16) | dest_port; break; default: DP_NOTICE(cdev, "Non valid LLH protocol filter type %d\n", type); return -EINVAL; } return 0; } int qed_llh_add_protocol_filter(struct qed_dev *cdev, u8 ppfid, enum qed_llh_prot_filter_type_t type, u16 source_port_or_eth_type, u16 dest_port) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u8 filter_idx, abs_ppfid, str[32], type_bitmap; union qed_llh_filter filter = {}; u32 high, low, ref_cnt; int rc = 0; if (!p_ptt) return -EAGAIN; if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) goto out; rc = qed_llh_protocol_filter_stringify(cdev, type, source_port_or_eth_type, dest_port, str, sizeof(str)); if (rc) goto err; filter.protocol.type = type; filter.protocol.source_port_or_eth_type = source_port_or_eth_type; filter.protocol.dest_port = dest_port; rc = qed_llh_shadow_add_filter(cdev, ppfid, QED_LLH_FILTER_TYPE_PROTOCOL, &filter, &filter_idx, &ref_cnt); if (rc) goto err; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto err; /* Configure the LLH only in case of a new the filter */ if (ref_cnt == 1) { rc = qed_llh_protocol_filter_to_hilo(cdev, type, source_port_or_eth_type, dest_port, &high, &low); if (rc) goto err; type_bitmap = 0x1 << type; rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, type_bitmap, high, low); if (rc) goto err; } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", str, ppfid, abs_ppfid, filter_idx, ref_cnt); goto out; err: DP_NOTICE(p_hwfn, "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n", str, ppfid); out: qed_ptt_release(p_hwfn, p_ptt); return rc; } void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); union qed_llh_filter filter = {}; u8 filter_idx, abs_ppfid; int rc = 0; u32 ref_cnt; if (!p_ptt) return; if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) goto out; if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) return; ether_addr_copy(filter.mac.addr, mac_addr); rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, &ref_cnt); if (rc) goto err; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto err; /* Remove from the LLH in case the filter is not in use */ if (!ref_cnt) { rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx); if (rc) goto err; } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); goto out; err: DP_NOTICE(cdev, "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n", mac_addr, ppfid); out: qed_ptt_release(p_hwfn, p_ptt); } void qed_llh_remove_protocol_filter(struct qed_dev *cdev, u8 ppfid, enum qed_llh_prot_filter_type_t type, u16 source_port_or_eth_type, u16 dest_port) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u8 filter_idx, abs_ppfid, str[32]; union qed_llh_filter filter = {}; int rc = 0; u32 ref_cnt; if (!p_ptt) return; if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) goto out; rc = qed_llh_protocol_filter_stringify(cdev, type, source_port_or_eth_type, dest_port, str, sizeof(str)); if (rc) goto err; filter.protocol.type = type; filter.protocol.source_port_or_eth_type = source_port_or_eth_type; filter.protocol.dest_port = dest_port; rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, &ref_cnt); if (rc) goto err; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto err; /* Remove from the LLH in case the filter is not in use */ if (!ref_cnt) { rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx); if (rc) goto err; } DP_VERBOSE(cdev, QED_MSG_SP, "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", str, ppfid, abs_ppfid, filter_idx, ref_cnt); goto out; err: DP_NOTICE(cdev, "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n", str, ppfid); out: qed_ptt_release(p_hwfn, p_ptt); } /******************************* NIG LLH - End ********************************/ #define QED_MIN_DPIS (4) #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum BAR_ID bar_id) { u32 bar_reg = (bar_id == BAR_ID_0 ? PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); u32 val; if (IS_VF(p_hwfn->cdev)) return qed_vf_hw_bar_size(p_hwfn, bar_id); val = qed_rd(p_hwfn, p_ptt, bar_reg); if (val) return 1 << (val + 15); /* Old MFW initialized above registered only conditionally */ if (p_hwfn->cdev->num_hwfns > 1) { DP_INFO(p_hwfn, "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); return BAR_ID_0 ? 256 * 1024 : 512 * 1024; } else { DP_INFO(p_hwfn, "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); return 512 * 1024; } } void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { u32 i; cdev->dp_level = dp_level; cdev->dp_module = dp_module; for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->dp_level = dp_level; p_hwfn->dp_module = dp_module; } } void qed_init_struct(struct qed_dev *cdev) { u8 i; for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->cdev = cdev; p_hwfn->my_id = i; p_hwfn->b_active = false; mutex_init(&p_hwfn->dmae_info.mutex); } /* hwfn 0 is always active */ cdev->hwfns[0].b_active = true; /* set the default cache alignment to 128 */ cdev->cache_shift = 7; } static void qed_qm_info_free(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; kfree(qm_info->qm_pq_params); qm_info->qm_pq_params = NULL; kfree(qm_info->qm_vport_params); qm_info->qm_vport_params = NULL; kfree(qm_info->qm_port_params); qm_info->qm_port_params = NULL; kfree(qm_info->wfq_data); qm_info->wfq_data = NULL; } static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->dbg_user_info); p_hwfn->dbg_user_info = NULL; } void qed_resc_free(struct qed_dev *cdev) { struct qed_rdma_info *rdma_info; struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { for_each_hwfn(cdev, i) qed_l2_free(&cdev->hwfns[i]); return; } kfree(cdev->fw_data); cdev->fw_data = NULL; kfree(cdev->reset_stats); cdev->reset_stats = NULL; qed_llh_free(cdev); for_each_hwfn(cdev, i) { p_hwfn = cdev->hwfns + i; rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); qed_spq_free(p_hwfn); qed_eq_free(p_hwfn); qed_consq_free(p_hwfn); qed_int_free(p_hwfn); #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn); #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) qed_fcoe_free(p_hwfn); if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_free(p_hwfn); qed_ooo_free(p_hwfn); } if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { qed_nvmetcp_free(p_hwfn); qed_ooo_free(p_hwfn); } if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); } qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); } } /******************** QM initialization *******************/ #define ACTIVE_TCS_BMAP 0x9f #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* determines the physical queue flags for a given PF. */ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) { u32 flags; /* common flags */ flags = PQ_FLAGS_LB; /* feature flags */ if (IS_QED_SRIOV(p_hwfn->cdev)) flags |= PQ_FLAGS_VFS; /* protocol flags */ switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: flags |= PQ_FLAGS_MCOS; break; case QED_PCI_FCOE: flags |= PQ_FLAGS_OFLD; break; case QED_PCI_ISCSI: case QED_PCI_NVMETCP: flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; case QED_PCI_ETH_ROCE: flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; if (IS_QED_MULTI_TC_ROCE(p_hwfn)) flags |= PQ_FLAGS_MTC; break; case QED_PCI_ETH_IWARP: flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; default: DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality); return 0; } return flags; } /* Getters for resource amounts necessary for qm initialization */ static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) { return p_hwfn->hw_info.num_hw_tc; } static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) { return IS_QED_SRIOV(p_hwfn->cdev) ? p_hwfn->cdev->p_iov_info->total_vfs : 0; } static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); if (!(PQ_FLAGS_MTC & pq_flags)) return 1; return qed_init_qm_get_num_tcs(p_hwfn); } #define NUM_DEFAULT_RLS 1 static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) { u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) return 0; /* subtract rls necessary for VFs and one default one for the PF */ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; return num_pf_rls; } static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); /* all pqs share the same vport, except for vfs and pf_rl pqs */ return (!!(PQ_FLAGS_RLS & pq_flags)) * qed_init_qm_get_num_pf_rls(p_hwfn) + (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn) + 1; } /* calc amount of PQs according to the requested flags */ static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); return (!!(PQ_FLAGS_RLS & pq_flags)) * qed_init_qm_get_num_pf_rls(p_hwfn) + (!!(PQ_FLAGS_MCOS & pq_flags)) * qed_init_qm_get_num_tcs(p_hwfn) + (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) * qed_init_qm_get_num_mtc_tcs(p_hwfn) + (!!(PQ_FLAGS_LLT & pq_flags)) * qed_init_qm_get_num_mtc_tcs(p_hwfn) + (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); } /* initialize the top level QM params */ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; bool four_port; /* pq and vport bases for this PF */ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; qm_info->vport_wfq_en = true; /* TC config is different for AH 4 port */ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; /* in AH 4 port we have fewer TCs per port */ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS; /* unless MFW indicated otherwise, ooo_tc == 3 for * AH 4-port and 4 otherwise. */ if (!qm_info->ooo_tc) qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC; } /* initialize qm vport params */ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; u8 i; /* all vports participate in weighted fair queueing */ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) qm_info->qm_vport_params[i].wfq = 1; } /* initialize qm port params */ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) { /* Initialize qm port parameters */ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; struct qed_dev *cdev = p_hwfn->cdev; /* indicate how ooo and high pri traffic is dealt with */ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP; for (i = 0; i < num_ports; i++) { struct init_qm_port_params *p_qm_port = &p_hwfn->qm_info.qm_port_params[i]; u16 pbf_max_cmd_lines; p_qm_port->active = 1; p_qm_port->active_phys_tcs = active_phys_tcs; pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev); p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports; } } /* Reset the params which must be reset for qm init. QM init may be called as * a result of flows other than driver load (e.g. dcbx renegotiation). Other * params may be affected by the init but would simply recalculate to the same * values. The allocations made for QM init, ports, vports, pqs and vfqs are not * affected as these amounts stay the same. */ static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; qm_info->num_pqs = 0; qm_info->num_vports = 0; qm_info->num_pf_rls = 0; qm_info->num_vf_pqs = 0; qm_info->first_vf_pq = 0; qm_info->first_mcos_pq = 0; qm_info->first_rl_pq = 0; } static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; qm_info->num_vports++; if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); } /* initialize a single pq and manage qm_info resources accounting. * The pq_init_flags param determines whether the PQ is rate limited * (for VF or PF) and whether a new vport is allocated to the pq or not * (i.e. vport will be shared). */ /* flags for pq init */ #define PQ_INIT_SHARE_VPORT BIT(0) #define PQ_INIT_PF_RL BIT(1) #define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 #define PQ_INIT_DEFAULT_TC 0 void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) { p_info->offload_tc = tc; p_info->offload_tc_set = true; } static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn) { return p_hwfn->hw_info.offload_tc_set; } static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn) { if (qed_is_offload_tc_set(p_hwfn)) return p_hwfn->hw_info.offload_tc; return PQ_INIT_DEFAULT_TC; } static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info, u8 tc, u32 pq_init_flags) { u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); if (pq_idx > max_pq) DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); /* init pq params */ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; qm_info->qm_pq_params[pq_idx].tc_id = tc; qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; qm_info->qm_pq_params[pq_idx].rl_valid = (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); /* qm params accounting */ qm_info->num_pqs++; if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) qm_info->num_vports++; if (pq_init_flags & PQ_INIT_PF_RL) qm_info->num_pf_rls++; if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); } /* get pq index according to PQ_FLAGS */ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, unsigned long pq_flags) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ if (bitmap_weight(&pq_flags, sizeof(pq_flags) * BITS_PER_BYTE) > 1) { DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); goto err; } if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); goto err; } switch (pq_flags) { case PQ_FLAGS_RLS: return &qm_info->first_rl_pq; case PQ_FLAGS_MCOS: return &qm_info->first_mcos_pq; case PQ_FLAGS_LB: return &qm_info->pure_lb_pq; case PQ_FLAGS_OOO: return &qm_info->ooo_pq; case PQ_FLAGS_ACK: return &qm_info->pure_ack_pq; case PQ_FLAGS_OFLD: return &qm_info->first_ofld_pq; case PQ_FLAGS_LLT: return &qm_info->first_llt_pq; case PQ_FLAGS_VFS: return &qm_info->first_vf_pq; default: goto err; } err: return &qm_info->start_pq; } /* save pq index in qm info */ static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, u32 pq_flags, u16 pq_val) { u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; } /* get tx pq index, with the PQ TX base already set (ready for context init) */ u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) { u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); return *base_pq_idx + CM_TX_PQ_BASE; } u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) { u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); if (max_tc == 0) { DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", PQ_FLAGS_MCOS); return p_hwfn->qm_info.start_pq; } if (tc > max_tc) DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); } u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) { u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); if (max_vf == 0) { DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", PQ_FLAGS_VFS); return p_hwfn->qm_info.start_pq; } if (vf > max_vf) DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); } u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) { u16 first_ofld_pq, pq_offset; first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? tc : PQ_INIT_DEFAULT_TC; return first_ofld_pq + pq_offset; } u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) { u16 first_llt_pq, pq_offset; first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? tc : PQ_INIT_DEFAULT_TC; return first_llt_pq + pq_offset; } /* Functions for creating specific types of pqs */ static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); } static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); } static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), PQ_INIT_SHARE_VPORT); } static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn) { u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn); struct qed_qm_info *qm_info = &p_hwfn->qm_info; u8 tc; /* override pq's TC if offload TC is set */ for (tc = 0; tc < num_tcs; tc++) qed_init_qm_pq(p_hwfn, qm_info, qed_is_offload_tc_set(p_hwfn) ? p_hwfn->hw_info.offload_tc : tc, PQ_INIT_SHARE_VPORT); } static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); qed_init_qm_mtc_pqs(p_hwfn); } static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); qed_init_qm_mtc_pqs(p_hwfn); } static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; u8 tc_idx; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); } static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); qm_info->num_vf_pqs = num_vfs; for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); } static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) { u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); struct qed_qm_info *qm_info = &p_hwfn->qm_info; if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) return; qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), PQ_INIT_PF_RL); } static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) { /* rate limited pqs, must come first (FW assumption) */ qed_init_qm_rl_pqs(p_hwfn); /* pqs for multi cos */ qed_init_qm_mcos_pqs(p_hwfn); /* pure loopback pq */ qed_init_qm_lb_pq(p_hwfn); /* out of order pq */ qed_init_qm_ooo_pq(p_hwfn); /* pure ack pq */ qed_init_qm_pure_ack_pq(p_hwfn); /* pq for offloaded protocol */ qed_init_qm_offload_pq(p_hwfn); /* low latency pq */ qed_init_qm_low_latency_pq(p_hwfn); /* done sharing vports */ qed_init_qm_advance_vport(p_hwfn); /* pqs for vfs */ qed_init_qm_vf_pqs(p_hwfn); } /* compare values of getters against resources amounts */ static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) { if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); return -EINVAL; } if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) return 0; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { p_hwfn->hw_info.multi_tc_roce_en = false; DP_NOTICE(p_hwfn, "multi-tc roce was disabled to reduce requested amount of pqs\n"); if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) return 0; } DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); return -EINVAL; } static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_vport_params *vport; struct init_qm_port_params *port; struct init_qm_pq_params *pq; int i, tc; /* top level params */ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n", qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->first_ofld_pq, qm_info->first_llt_pq, qm_info->pure_ack_pq); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); /* port table */ for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) { port = &(qm_info->qm_port_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved); } /* vport table */ for (i = 0; i < qm_info->num_vports; i++) { vport = &(qm_info->qm_vport_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "vport idx %d, wfq %d, first_tx_pq_id [ ", qm_info->start_vport + i, vport->wfq); for (tc = 0; tc < NUM_OF_TCS; tc++) DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "%d ", vport->first_tx_pq_id[tc]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); } /* pq table */ for (i = 0; i < qm_info->num_pqs; i++) { pq = &(qm_info->qm_pq_params[i]); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n", qm_info->start_pq + i, pq->port_id, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); } } static void qed_init_qm_info(struct qed_hwfn *p_hwfn) { /* reset params required for init run */ qed_init_qm_reset_params(p_hwfn); /* init QM top level params */ qed_init_qm_params(p_hwfn); /* init QM port params */ qed_init_qm_port_params(p_hwfn); /* init QM vport params */ qed_init_qm_vport_params(p_hwfn); /* init QM physical queue params */ qed_init_qm_pq_params(p_hwfn); /* display all that init */ qed_dp_init_qm_params(p_hwfn); } /* This function reconfigures the QM pf on the fly. * For this purpose we: * 1. reconfigure the QM database * 2. set new values to runtime array * 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 4. activate init tool in QM_PF stage * 5. send an sdm_qm_cmd through rbc interface to release the QM */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; bool b_rc; int rc; /* initialize qed's qm data structure */ qed_init_qm_info(p_hwfn); /* stop PF's qm queues */ spin_lock_bh(&qm_lock); b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, qm_info->start_pq, qm_info->num_pqs); spin_unlock_bh(&qm_lock); if (!b_rc) return -EINVAL; /* prepare QM portion of runtime array */ qed_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, p_hwfn->hw_info.hw_mode); if (rc) return rc; /* start PF's qm queues */ spin_lock_bh(&qm_lock); b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, qm_info->start_pq, qm_info->num_pqs); spin_unlock_bh(&qm_lock); if (!b_rc) return -EINVAL; return 0; } static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; int rc; rc = qed_init_qm_sanity(p_hwfn); if (rc) goto alloc_err; qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn), sizeof(*qm_info->qm_pq_params), GFP_KERNEL); if (!qm_info->qm_pq_params) goto alloc_err; qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn), sizeof(*qm_info->qm_vport_params), GFP_KERNEL); if (!qm_info->qm_vport_params) goto alloc_err; qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine, sizeof(*qm_info->qm_port_params), GFP_KERNEL); if (!qm_info->qm_port_params) goto alloc_err; qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn), sizeof(*qm_info->wfq_data), GFP_KERNEL); if (!qm_info->wfq_data) goto alloc_err; return 0; alloc_err: DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); qed_qm_info_free(p_hwfn); return -ENOMEM; } int qed_resc_alloc(struct qed_dev *cdev) { u32 rdma_tasks, excess_tasks; u32 line_count; int i, rc = 0; if (IS_VF(cdev)) { for_each_hwfn(cdev, i) { rc = qed_l2_alloc(&cdev->hwfns[i]); if (rc) return rc; } return rc; } cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); if (!cdev->fw_data) return -ENOMEM; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; /* Initialize the doorbell recovery mechanism */ rc = qed_db_recovery_setup(p_hwfn); if (rc) goto alloc_err; /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); if (rc) goto alloc_err; /* Set the HW cid/tid numbers (in the contest manager) * Must be done prior to any further computations. */ rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); if (rc) goto alloc_err; rc = qed_alloc_qm_data(p_hwfn); if (rc) goto alloc_err; /* init qm info */ qed_init_qm_info(p_hwfn); /* Compute the ILT client partition */ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); if (rc) { DP_NOTICE(p_hwfn, "too many ILT lines; re-computing with less lines\n"); /* In case there are not enough ILT lines we reduce the * number of RDMA tasks and re-compute. */ excess_tasks = qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); if (!excess_tasks) goto alloc_err; rdma_tasks = RDMA_MAX_TIDS - excess_tasks; rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); if (rc) goto alloc_err; rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); if (rc) { DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n", line_count); goto alloc_err; } } /* CID map / ILT shadow table / T2 * The talbes sizes are determined by the computations above */ rc = qed_cxt_tables_alloc(p_hwfn); if (rc) goto alloc_err; /* SPQ, must follow ILT because initializes SPQ context */ rc = qed_spq_alloc(p_hwfn); if (rc) goto alloc_err; /* SP status block allocation */ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_DPC); rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); if (rc) goto alloc_err; rc = qed_iov_alloc(p_hwfn); if (rc) goto alloc_err; /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn); enum protocol_type rdma_proto; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) rdma_proto = PROTOCOLID_ROCE; else rdma_proto = PROTOCOLID_IWARP; num_cons = qed_cxt_get_proto_cid_count(p_hwfn, rdma_proto, NULL) * 2; /* EQ should be able to get events from all SRQ's * at the same time */ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_TCP_ULP, NULL); n_eqes += 2 * num_cons; } if (n_eqes > 0xFFFF) { DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", n_eqes, 0xFFFF); goto alloc_no_mem; } rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; rc = qed_consq_alloc(p_hwfn); if (rc) goto alloc_err; rc = qed_l2_alloc(p_hwfn); if (rc) goto alloc_err; #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) { rc = qed_ll2_alloc(p_hwfn); if (rc) goto alloc_err; } #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { rc = qed_fcoe_alloc(p_hwfn); if (rc) goto alloc_err; } if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { rc = qed_iscsi_alloc(p_hwfn); if (rc) goto alloc_err; rc = qed_ooo_alloc(p_hwfn); if (rc) goto alloc_err; } if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { rc = qed_nvmetcp_alloc(p_hwfn); if (rc) goto alloc_err; rc = qed_ooo_alloc(p_hwfn); if (rc) goto alloc_err; } if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { rc = qed_rdma_info_alloc(p_hwfn); if (rc) goto alloc_err; } /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); if (rc) goto alloc_err; /* DCBX initialization */ rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info); if (rc) goto alloc_err; } rc = qed_llh_alloc(cdev); if (rc) { DP_NOTICE(cdev, "Failed to allocate memory for the llh_info structure\n"); goto alloc_err; } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); if (!cdev->reset_stats) goto alloc_no_mem; return 0; alloc_no_mem: rc = -ENOMEM; alloc_err: qed_resc_free(cdev); return rc; } static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, u8 opcode, u16 echo, union event_ring_data *data, u8 fw_return_code) { if (fw_return_code != COMMON_ERR_CODE_ERROR) goto eqe_unexpected; if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); return 0; } eqe_unexpected: DP_ERR(p_hwfn, "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", opcode, fw_return_code, echo); return -EINVAL; } static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: case COMMON_EVENT_VF_FLR: return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, fw_return_code); case COMMON_EVENT_FW_ERROR: return qed_fw_err_handler(p_hwfn, opcode, le16_to_cpu(echo), data, fw_return_code); default: DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", opcode, echo); return -EINVAL; } } void qed_resc_setup(struct qed_dev *cdev) { int i; if (IS_VF(cdev)) { for_each_hwfn(cdev, i) qed_l2_setup(&cdev->hwfns[i]); return; } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_cxt_mngr_setup(p_hwfn); qed_spq_setup(p_hwfn); qed_eq_setup(p_hwfn); qed_consq_setup(p_hwfn); /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); memcpy(p_hwfn->mcp_info->mfw_mb_shadow, p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); #endif if (p_hwfn->hw_info.personality == QED_PCI_FCOE) qed_fcoe_setup(p_hwfn); if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_setup(p_hwfn); qed_ooo_setup(p_hwfn); } if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { qed_nvmetcp_setup(p_hwfn); qed_ooo_setup(p_hwfn); } } } #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf) { u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); if (is_vf) id += 0x10; command |= X_FINAL_CLEANUP_AGG_INT << SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { DP_NOTICE(p_hwfn, "Unexpected; Found final cleanup notification before initiating final cleanup\n"); REG_WR(p_hwfn, addr, 0); } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Sending final cleanup for PFVF[%d] [Command %08x]\n", id, command); qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); /* Poll until completion */ while (!REG_RD(p_hwfn, addr) && count--) msleep(FINAL_CLEANUP_POLL_TIME); if (REG_RD(p_hwfn, addr)) rc = 0; else DP_NOTICE(p_hwfn, "Failed to receive FW final cleanup notification\n"); /* Cleanup afterwards */ REG_WR(p_hwfn, addr, 0); return rc; } static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) { int hw_mode = 0; if (QED_IS_BB_B0(p_hwfn->cdev)) { hw_mode |= 1 << MODE_BB; } else if (QED_IS_AH(p_hwfn->cdev)) { hw_mode |= 1 << MODE_K2; } else { DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", p_hwfn->cdev->type); return -EINVAL; } switch (p_hwfn->cdev->num_ports_in_engine) { case 1: hw_mode |= 1 << MODE_PORTS_PER_ENG_1; break; case 2: hw_mode |= 1 << MODE_PORTS_PER_ENG_2; break; case 4: hw_mode |= 1 << MODE_PORTS_PER_ENG_4; break; default: DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", p_hwfn->cdev->num_ports_in_engine); return -EINVAL; } if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) hw_mode |= 1 << MODE_MF_SD; else hw_mode |= 1 << MODE_MF_SI; hw_mode |= 1 << MODE_ASIC; if (p_hwfn->cdev->num_hwfns > 1) hw_mode |= 1 << MODE_100G; p_hwfn->hw_info.hw_mode = hw_mode; DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), "Configuring function for hw_mode: 0x%08x\n", p_hwfn->hw_info.hw_mode); return 0; } /* Init run time data for all PFs on an engine. */ static void qed_init_cau_rt_data(struct qed_dev *cdev) { u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; int i, igu_sb_id; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_igu_info *p_igu_info; struct qed_igu_block *p_block; struct cau_sb_entry sb_entry; p_igu_info = p_hwfn->hw_info.p_igu_info; for (igu_sb_id = 0; igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { p_block = &p_igu_info->entry[igu_sb_id]; if (!p_block->is_pf) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_block->function_id, 0, 0); STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, sb_entry); } } } static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 val, wr_mbs, cache_line_size; val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); switch (val) { case 0: wr_mbs = 128; break; case 1: wr_mbs = 256; break; case 2: wr_mbs = 512; break; default: DP_INFO(p_hwfn, "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", val); return; } cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); switch (cache_line_size) { case 32: val = 0; break; case 64: val = 1; break; case 128: val = 2; break; case 256: val = 3; break; default: DP_INFO(p_hwfn, "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", cache_line_size); } if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); if (val > 0) { STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); } } static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { DP_NOTICE(p_hwfn->cdev, "Failed to allocate common init params\n"); return -ENOMEM; } qed_init_cau_rt_data(cdev); /* Program GTT windows */ qed_gtt_init(p_hwfn); if (p_hwfn->mcp_info) { if (p_hwfn->mcp_info->func_info.bandwidth_max) qm_info->pf_rl_en = true; if (p_hwfn->mcp_info->func_info.bandwidth_min) qm_info->pf_wfq_en = true; } params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params->pf_rl_en = qm_info->pf_rl_en; params->pf_wfq_en = qm_info->pf_wfq_en; params->global_rl_en = qm_info->vport_rl_en; params->vport_wfq_en = qm_info->vport_wfq_en; params->port_params = qm_info->qm_port_params; qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); qed_init_cache_line_size(p_hwfn, p_ptt); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); if (QED_IS_BB(p_hwfn->cdev)) { num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); for (pf_id = 0; pf_id < num_pfs; pf_id++) { qed_fid_pretend(p_hwfn, p_ptt, pf_id); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); } /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); } /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); out: kfree(params); return rc; } static int qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) { u32 dpi_bit_shift, dpi_count, dpi_page_size; u32 min_dpis; u32 n_wids; /* Calculate DPI size */ n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); dpi_bit_shift = ilog2(dpi_page_size / 4096); dpi_count = pwm_region_size / dpi_page_size; min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); p_hwfn->dpi_size = dpi_page_size; p_hwfn->dpi_count = dpi_count; qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); if (dpi_count < min_dpis) return -EINVAL; return 0; } enum QED_ROCE_EDPM_MODE { QED_ROCE_EDPM_MODE_ENABLE = 0, QED_ROCE_EDPM_MODE_FORCE_ON = 1, QED_ROCE_EDPM_MODE_DISABLE = 2, }; bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) { if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) return false; return true; } static int qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 pwm_regsize, norm_regsize; u32 non_pwm_conn, min_addr_reg1; u32 db_bar_size, n_cpus = 1; u32 roce_edpm_mode; u32 pf_dems_shift; int rc = 0; u8 cond; db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); if (p_hwfn->cdev->num_hwfns > 1) db_bar_size /= 2; /* Calculate doorbell regions */ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, NULL) + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, NULL); norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); min_addr_reg1 = norm_regsize / 4096; pwm_regsize = db_bar_size - norm_regsize; /* Check that the normal and PWM sizes are valid */ if (db_bar_size < norm_regsize) { DP_ERR(p_hwfn->cdev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize); return -EINVAL; } if (pwm_regsize < QED_MIN_PWM_REGION) { DP_ERR(p_hwfn->cdev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, QED_MIN_PWM_REGION, db_bar_size, norm_regsize); return -EINVAL; } /* Calculate number of DPIs */ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { /* Either EDPM is mandatory, or we are attempting to allocate a * WID per CPU. */ n_cpus = num_present_cpus(); rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); } cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); if (cond || p_hwfn->dcbx_no_edpm) { /* Either EDPM is disabled from user configuration, or it is * disabled via DCBx, or it is not mandatory and we failed to * allocated a WID per CPU. */ n_cpus = 1; rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); if (cond) qed_rdma_dpm_bar(p_hwfn, p_ptt); } p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, (!qed_edpm_enabled(p_hwfn)) ? "disabled" : "enabled", PAGE_SIZE); if (rc) { DP_ERR(p_hwfn, "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", p_hwfn->dpi_count, p_hwfn->pf_params.rdma_pf_params.min_dpis); return -EINVAL; } p_hwfn->dpi_start_offset = norm_regsize; /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); return 0; } static int qed_hw_init_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { int rc = 0; /* In CMT the gate should be cleared by the 2nd hwfn */ if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn)) STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); if (rc) return rc; qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); return 0; } static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, int hw_mode, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch) { u8 rel_pf_id = p_hwfn->rel_pf_id; int rc = 0; if (p_hwfn->mcp_info) { struct qed_mcp_function_info *p_info; p_info = &p_hwfn->mcp_info->func_info; if (p_info->bandwidth_min) p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; /* Update rate limit once we'll actually have a link */ p_hwfn->qm_info.pf_rl = 100000; } qed_cxt_hw_init_pf(p_hwfn, p_ptt); qed_int_igu_init_rt(p_hwfn); /* Set VLAN in NIG if needed */ if (hw_mode & BIT(MODE_MF_SD)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, p_hwfn->hw_info.ovlan); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 1); } /* Enable classification by MAC if needed */ if (hw_mode & BIT(MODE_MF_SI)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); } /* Protocol Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) || (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Sanity check before the PF init sequence that uses DMAE */ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); if (rc) return rc; /* PF Init sequence */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) return rc; /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); if (rc) return rc; qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem); /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); if (rc) return rc; /* Use the leading hwfn since in CMT only NIG #0 is operational */ if (IS_LEAD_HWFN(p_hwfn)) { rc = qed_llh_hw_init_pf(p_hwfn, p_ptt); if (rc) return rc; } if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, allow_npar_tx_switch); if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); return rc; } if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); qed_wr(p_hwfn, p_ptt, PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, 0x100); } } return rc; } int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable) { u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; /* Configure the PF's internal FID_enable for master transactions */ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); /* Wait until value is set - try for 1 second every 50us */ for (delay_idx = 0; delay_idx < 20000; delay_idx++) { val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); if (val == set_val) break; usleep_range(50, 60); } if (val != set_val) { DP_NOTICE(p_hwfn, "PFID_ENABLE_MASTER wasn't changed after a second\n"); return -EAGAIN; } return 0; } static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, struct qed_ptt *p_main_ptt) { /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_main_ptt); memcpy(p_hwfn->mcp_info->mfw_mb_shadow, p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); } static void qed_fill_load_req_params(struct qed_load_req_params *p_load_req, struct qed_drv_load_params *p_drv_load) { memset(p_load_req, 0, sizeof(*p_load_req)); p_load_req->drv_role = p_drv_load->is_crash_kernel ? QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; p_load_req->timeout_val = p_drv_load->mfw_timeout_val; p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; p_load_req->override_force_load = p_drv_load->override_force_load; } static int qed_vf_start(struct qed_hwfn *p_hwfn, struct qed_hw_init_params *p_params) { if (p_params->p_tunn) { qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); } p_hwfn->b_int_enabled = true; return 0; } static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, BIT(p_hwfn->abs_pf_id)); } int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; const u32 *fw_overlays; u32 fw_overlays_len; u16 ether_type; int rc = 0, i; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); return -EINVAL; } if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, p_params->bin_fw_data); if (rc) return rc; } for_each_hwfn(cdev, i) { p_hwfn = &cdev->hwfns[i]; /* If management didn't provide a default, set one of our own */ if (!p_hwfn->hw_info.mtu) { p_hwfn->hw_info.mtu = 1500; b_default_mtu = false; } if (IS_VF(cdev)) { qed_vf_start(p_hwfn, p_params); continue; } /* Some flows may keep variable set */ p_hwfn->mcp_info->mcp_handling_status = 0; rc = qed_calc_hw_mode(p_hwfn); if (rc) return rc; if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits) || test_bit(QED_MF_8021AD_TAGGING, &cdev->mf_bits))) { if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) ether_type = ETH_P_8021Q; else ether_type = ETH_P_8021AD; STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ether_type); STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ether_type); STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ether_type); STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ether_type); } qed_fill_load_req_params(&load_req_params, p_params->p_drv_load_params); rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_req_params); if (rc) { DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); return rc; } load_code = load_req_params.load_code; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load request was sent. Load code: 0x%x\n", load_code); /* Only relevant for recovery: * Clear the indication after LOAD_REQ is responded by the MFW. */ cdev->recov_in_prog = false; qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); /* Clean up chip from previous driver if such remains exist. * This is not needed when the PF is the first one on the * engine, since afterwards we are going to init the FW. */ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->rel_pf_id, false); if (rc) { qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt, QED_HW_ERR_RAMROD_FAIL, "Final cleanup failed\n"); goto load_err; } } /* Log and clear previous pglue_b errors if such exist */ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, true); if (rc) goto load_err; /* Clear the pglue_b was_error indication. * In E4 it must be done after the BME and the internal * FID_enable for the PF are set, since VDMs may cause the * indication to be set again. */ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); fw_overlays = cdev->fw_data->fw_overlays; fw_overlays_len = cdev->fw_data->fw_overlays_len; p_hwfn->fw_overlay_mem = qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays, fw_overlays_len); if (!p_hwfn->fw_overlay_mem) { DP_NOTICE(p_hwfn, "Failed to allocate fw overlay memory\n"); rc = -ENOMEM; goto load_err; } switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, p_hwfn->hw_info.hw_mode, p_params->b_hw_start, p_params->int_mode, p_params->allow_npar_tx_switch); break; default: DP_NOTICE(p_hwfn, "Unexpected load code [0x%08x]", load_code); rc = -EINVAL; break; } if (rc) { DP_NOTICE(p_hwfn, "init phase failed for loadcode 0x%x (rc %d)\n", load_code, rc); goto load_err; } rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); if (rc) return rc; /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "sending phony dcbx set command to trigger DCBx attention handling\n"); rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_SET_DCBX, 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp, &param); if (rc) { DP_NOTICE(p_hwfn, "Failed to send DCBX attention request\n"); return rc; } p_hwfn->hw_init_done = true; } if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); /* Get pre-negotiated values for stag, bandwidth etc. */ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_GET_OEM_UPDATES, drv_mb_param, &resp, &param); if (rc) DP_NOTICE(p_hwfn, "Failed to send GET_OEM_UPDATES attention request\n"); drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, drv_mb_param, &load_code, &param); if (rc) DP_INFO(p_hwfn, "Failed to update firmware version\n"); if (!b_default_mtu) { rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.mtu); if (rc) DP_INFO(p_hwfn, "Failed to update default mtu\n"); } rc = qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, QED_OV_DRIVER_STATE_DISABLED); if (rc) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } return 0; load_err: /* The MFW load lock should be released also when initialization fails. */ qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); return rc; } #define QED_HW_STOP_RETRY_LIMIT (10) static void qed_hw_timers_stop(struct qed_dev *cdev, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int i; /* close timers */ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); if (cdev->recov_in_prog) return; for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) break; /* Dependent on number of connection/tasks, possibly * 1ms sleep is required between polls */ usleep_range(1000, 2000); } if (i < QED_HW_STOP_RETRY_LIMIT) return; DP_NOTICE(p_hwfn, "Timers linear scans are not over [Connection %02x Tasks %02x]\n", (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); } void qed_hw_timers_stop_all(struct qed_dev *cdev) { int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; qed_hw_timers_stop(cdev, p_hwfn, p_ptt); } } int qed_hw_stop(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; int rc, rc2 = 0; int j; for_each_hwfn(cdev, j) { p_hwfn = &cdev->hwfns[j]; p_ptt = p_hwfn->p_main_ptt; DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); if (IS_VF(cdev)) { qed_vf_pf_int_cleanup(p_hwfn); rc = qed_vf_pf_reset(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "qed_vf_pf_reset failed. rc = %d.\n", rc); rc2 = -EINVAL; } continue; } /* mark the hw as uninitialized... */ p_hwfn->hw_init_done = false; /* Send unload command to MCP */ if (!cdev->recov_in_prog) { rc = qed_mcp_unload_req(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "Failed sending a UNLOAD_REQ command. rc = %d.\n", rc); rc2 = -EINVAL; } } qed_slowpath_irq_sync(p_hwfn); /* After this point no MFW attentions are expected, e.g. prevent * race between pf stop and dcbx pf update. */ rc = qed_sp_pf_stop(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", rc); rc2 = -EINVAL; } qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); qed_hw_timers_stop(cdev, p_hwfn, p_ptt); /* Disable Attention Generation */ qed_int_igu_disable_int(p_hwfn, p_ptt); qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); /* Need to wait 1ms to guarantee SBs are cleared */ usleep_range(1000, 2000); /* Disable PF in HW blocks */ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); if (IS_LEAD_HWFN(p_hwfn) && test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && !QED_IS_FCOE_PERSONALITY(p_hwfn)) qed_llh_remove_mac_filter(cdev, 0, p_hwfn->hw_info.hw_mac_addr); if (!cdev->recov_in_prog) { rc = qed_mcp_unload_done(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "Failed sending a UNLOAD_DONE command. rc = %d.\n", rc); rc2 = -EINVAL; } } } if (IS_PF(cdev) && !cdev->recov_in_prog) { p_hwfn = QED_LEADING_HWFN(cdev); p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; /* Clear the PF's internal FID_enable in the PXP. * In CMT this should only be done for first hw-function, and * only after all transactions have stopped for all active * hw-functions. */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc) { DP_NOTICE(p_hwfn, "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", rc); rc2 = -EINVAL; } } return rc2; } int qed_hw_stop_fastpath(struct qed_dev *cdev) { int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; struct qed_ptt *p_ptt; if (IS_VF(cdev)) { qed_vf_pf_int_cleanup(p_hwfn); continue; } p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); /* Need to wait 1ms to guarantee SBs are cleared */ usleep_range(1000, 2000); qed_ptt_release(p_hwfn, p_ptt); } return 0; } int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) { struct qed_ptt *p_ptt; if (IS_VF(p_hwfn->cdev)) return 0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; if (p_hwfn->p_rdma_info && p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); /* Re-open incoming traffic */ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); qed_ptt_release(p_hwfn, p_ptt); return 0; } /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) { qed_ptt_pool_free(p_hwfn); kfree(p_hwfn->hw_info.p_igu_info); p_hwfn->hw_info.p_igu_info = NULL; } /* Setup bar access */ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) { /* clear indirect access */ if (QED_IS_AH(p_hwfn->cdev)) { qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); } else { qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); } /* Clean previous pglue_b errors if such exist */ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID); p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PORT); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); } static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; struct qed_sb_cnt_info sb_cnt; u32 non_l2_sbs = 0; memset(&sb_cnt, 0, sizeof(sb_cnt)); qed_int_get_num_sbs(p_hwfn, &sb_cnt); if (IS_ENABLED(CONFIG_QED_RDMA) && QED_IS_RDMA_PERSONALITY(p_hwfn)) { /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide * the status blocks equally between L2 / RoCE but with * consideration as to how many l2 queues / cnqs we have. */ feat_num[QED_RDMA_CNQ] = min_t(u32, sb_cnt.cnt / 2, RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); non_l2_sbs = feat_num[QED_RDMA_CNQ]; } if (QED_IS_L2_PERSONALITY(p_hwfn)) { /* Start by allocating VF queues, then PF's */ feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), sb_cnt.iov_cnt); feat_num[QED_PF_L2_QUE] = min_t(u32, sb_cnt.cnt - non_l2_sbs, RESC_NUM(p_hwfn, QED_L2_QUEUE) - FEAT_NUM(p_hwfn, QED_VF_L2_QUE)); } if (QED_IS_FCOE_PERSONALITY(p_hwfn)) feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ), (int)sb_cnt.cnt); } const char *qed_hw_get_resc_name(enum qed_resources res_id) { switch (res_id) { case QED_L2_QUEUE: return "L2_QUEUE"; case QED_VPORT: return "VPORT"; case QED_RSS_ENG: return "RSS_ENG"; case QED_PQ: return "PQ"; case QED_RL: return "RL"; case QED_MAC: return "MAC"; case QED_VLAN: return "VLAN"; case QED_RDMA_CNQ_RAM: return "RDMA_CNQ_RAM"; case QED_ILT: return "ILT"; case QED_LL2_RAM_QUEUE: return "LL2_RAM_QUEUE"; case QED_LL2_CTX_QUEUE: return "LL2_CTX_QUEUE"; case QED_CMDQS_CQS: return "CMDQS_CQS"; case QED_RDMA_STATS_QUEUE: return "RDMA_STATS_QUEUE"; case QED_BDQ: return "BDQ"; case QED_SB: return "SB"; default: return "UNKNOWN_RESOURCE"; } } static int __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_resources res_id, u32 resc_max_val, u32 *p_mcp_resp) { int rc; rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, resc_max_val, p_mcp_resp); if (rc) { DP_NOTICE(p_hwfn, "MFW response failure for a max value setting of resource %d [%s]\n", res_id, qed_hw_get_resc_name(res_id)); return rc; } if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) DP_INFO(p_hwfn, "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); return 0; } static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = { {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,}, {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, }; u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) { enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; if (type >= QED_NUM_HSI_DEFS) { DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type); return 0; } return qed_hsi_def_val[type][chip_id]; } static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES; break; case QED_LL2_CTX_QUEUE: resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES; break; case QED_RDMA_CNQ_RAM: /* No need for a case for QED_CMDQS_CQS since * CNQ/CMDQS are the same resource. */ resc_max_val = NUM_OF_GLOBAL_QUEUES; break; case QED_RDMA_STATS_QUEUE: resc_max_val = NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev); break; case QED_BDQ: resc_max_val = BDQ_NUM_RESOURCES; break; default: continue; } rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, resc_max_val, &mcp_resp); if (rc) return rc; /* There's no point to continue to the next resource if the * command is not supported by the MFW. * We do continue if the command is supported but the resource * is unknown to the MFW. Such a resource will be later * configured with the default allocation values. */ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) return -EINVAL; } return 0; } static int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, enum qed_resources res_id, u32 *p_resc_num, u32 *p_resc_start) { u8 num_funcs = p_hwfn->num_funcs_on_engine; struct qed_dev *cdev = p_hwfn->cdev; switch (res_id) { case QED_L2_QUEUE: *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs; break; case QED_VPORT: *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs; break; case QED_RSS_ENG: *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs; break; case QED_PQ: *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs; *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ break; case QED_RL: *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs; break; case QED_MAC: case QED_VLAN: /* Each VFC resource can accommodate both a MAC and a VLAN */ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; break; case QED_ILT: *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs; break; case QED_LL2_RAM_QUEUE: *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; break; case QED_LL2_CTX_QUEUE: *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs; break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; break; case QED_RDMA_STATS_QUEUE: *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs; break; case QED_BDQ: if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && p_hwfn->hw_info.personality != QED_PCI_FCOE && p_hwfn->hw_info.personality != QED_PCI_NVMETCP) *p_resc_num = 0; else *p_resc_num = 1; break; case QED_SB: /* Since we want its value to reflect whether MFW supports * the new scheme, have a default of 0. */ *p_resc_num = 0; break; default: return -EINVAL; } switch (res_id) { case QED_BDQ: if (!*p_resc_num) *p_resc_start = 0; else if (p_hwfn->cdev->num_ports_in_engine == 4) *p_resc_start = p_hwfn->port_id; else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || p_hwfn->hw_info.personality == QED_PCI_NVMETCP) *p_resc_start = p_hwfn->port_id; else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) *p_resc_start = p_hwfn->port_id + 2; break; default: *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; break; } return 0; } static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, enum qed_resources res_id) { u32 dflt_resc_num = 0, dflt_resc_start = 0; u32 mcp_resp, *p_resc_num, *p_resc_start; int rc; p_resc_num = &RESC_NUM(p_hwfn, res_id); p_resc_start = &RESC_START(p_hwfn, res_id); rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, &dflt_resc_start); if (rc) { DP_ERR(p_hwfn, "Failed to get default amount for resource %d [%s]\n", res_id, qed_hw_get_resc_name(res_id)); return rc; } rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, &mcp_resp, p_resc_num, p_resc_start); if (rc) { DP_NOTICE(p_hwfn, "MFW response failure for an allocation request for resource %d [%s]\n", res_id, qed_hw_get_resc_name(res_id)); return rc; } /* Default driver values are applied in the following cases: * - The resource allocation MB command is not supported by the MFW * - There is an internal error in the MFW while processing the request * - The resource ID is unknown to the MFW */ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { DP_INFO(p_hwfn, "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", res_id, qed_hw_get_resc_name(res_id), mcp_resp, dflt_resc_num, dflt_resc_start); *p_resc_num = dflt_resc_num; *p_resc_start = dflt_resc_start; goto out; } out: /* PQs have to divide by 8 [that's the HW granularity]. * Reduce number so it would fit. */ if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { DP_INFO(p_hwfn, "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", *p_resc_num, (*p_resc_num) & ~0x7, *p_resc_start, (*p_resc_start) & ~0x7); *p_resc_num &= ~0x7; *p_resc_start &= ~0x7; } return 0; } static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) { int rc; u8 res_id; for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { rc = __qed_hw_set_resc_info(p_hwfn, res_id); if (rc) return rc; } return 0; } static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; u8 native_ppfid_idx; int rc; /* Calculation of BB/AH is different for native_ppfid_idx */ if (QED_IS_BB(cdev)) native_ppfid_idx = p_hwfn->rel_pf_id; else native_ppfid_idx = p_hwfn->rel_pf_id / cdev->num_ports_in_engine; rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); if (rc != 0 && rc != -EOPNOTSUPP) return rc; else if (rc == -EOPNOTSUPP) cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { DP_INFO(p_hwfn, "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n", native_ppfid_idx, cdev->ppfid_bitmap); cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; } return 0; } static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_resc_unlock_params resc_unlock_params; struct qed_resc_lock_params resc_lock_params; bool b_ah = QED_IS_AH(p_hwfn->cdev); u8 res_id; int rc; /* Setting the max values of the soft resources and the following * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, QED_RESC_LOCK_RESC_ALLOC, false); rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); if (rc && rc != -EINVAL) { return rc; } else if (rc == -EINVAL) { DP_INFO(p_hwfn, "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); } else if (!resc_lock_params.b_granted) { DP_NOTICE(p_hwfn, "Failed to acquire the resource lock for the resource allocation commands\n"); return -EBUSY; } else { rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); if (rc && rc != -EINVAL) { DP_NOTICE(p_hwfn, "Failed to set the max values of the soft resources\n"); goto unlock_and_exit; } else if (rc == -EINVAL) { DP_INFO(p_hwfn, "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); if (rc) DP_INFO(p_hwfn, "Failed to release the resource lock for the resource allocation commands\n"); } } rc = qed_hw_set_resc_info(p_hwfn); if (rc) goto unlock_and_exit; if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); if (rc) DP_INFO(p_hwfn, "Failed to release the resource lock for the resource allocation commands\n"); } /* PPFID bitmap */ if (IS_LEAD_HWFN(p_hwfn)) { rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt); if (rc) return rc; } /* Sanity for ILT */ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", RESC_START(p_hwfn, QED_ILT), RESC_END(p_hwfn, QED_ILT) - 1); return -EINVAL; } /* This will also learn the number of SBs from MFW */ if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) return -EINVAL; qed_hw_set_feat(p_hwfn); for (res_id = 0; res_id < QED_MAX_RESC; res_id++) DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", qed_hw_get_resc_name(res_id), RESC_NUM(p_hwfn, res_id), RESC_START(p_hwfn, res_id)); return 0; unlock_and_exit: if (resc_lock_params.b_granted && !resc_unlock_params.b_released) qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); return rc; } static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; int i; /* Read global nvm_cfg address */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); /* Verify MCP has initialized it */ if (!nvm_cfg_addr) { DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); return -EINVAL; } /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, core_cfg); core_cfg = qed_rd(p_hwfn, p_ptt, addr); switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4: break; default: DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); break; } /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; p_caps = &p_hwfn->mcp_info->link_capabilities; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, speed_cap_mask)); link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; link->speed.advertised_speeds = link_temp; p_caps->speed_capabilities = link->speed.advertised_speeds; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, link_settings)); switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: link->speed.autoneg = true; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: link->speed.forced_speed = 1000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: link->speed.forced_speed = 10000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: link->speed.forced_speed = 20000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: link->speed.forced_speed = 25000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: link->speed.forced_speed = 40000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: link->speed.forced_speed = 50000; break; case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: link->speed.forced_speed = 100000; break; default: DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } p_caps->default_speed_autoneg = link->speed.autoneg; fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { switch (GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_FEC_FORCE_MODE)) { case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: p_caps->fec_default |= QED_FEC_MODE_NONE; break; case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: p_caps->fec_default |= QED_FEC_MODE_FIRECODE; break; case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: p_caps->fec_default |= QED_FEC_MODE_RS; break; case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: p_caps->fec_default |= QED_FEC_MODE_AUTO; break; default: DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "unknown FEC mode in 0x%08x\n", link_temp); } } else { p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; } link->fec = p_caps->fec_default; if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, ext_phy)); link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; p_caps->default_eee = QED_MCP_EEE_ENABLED; link->eee.enable = true; switch (link_temp) { case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: p_caps->default_eee = QED_MCP_EEE_DISABLED; link->eee.enable = false; break; case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; break; case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; break; case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; break; } link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; link->eee.tx_lpi_enable = link->eee.enable; link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; } else { p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { ext_speed = &link->ext_speed; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, extended_speed)); fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) ext_speed->autoneg = true; ext_speed->forced_speed = 0; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) ext_speed->forced_speed |= QED_EXT_SPEED_1G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) ext_speed->forced_speed |= QED_EXT_SPEED_10G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) ext_speed->forced_speed |= QED_EXT_SPEED_20G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) ext_speed->forced_speed |= QED_EXT_SPEED_25G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) ext_speed->forced_speed |= QED_EXT_SPEED_40G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED_CAP); ext_speed->advertised_speeds = 0; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_50G_R; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_50G_R2; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_100G_R2; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_100G_R4; if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_100G_P4; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, extended_fec_mode)); link->ext_fec_mode = link_temp; p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; p_caps->default_ext_speed = ext_speed->forced_speed; p_caps->default_ext_autoneg = ext_speed->autoneg; p_caps->default_ext_fec = link->ext_fec_mode; DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n", ext_speed->forced_speed, ext_speed->advertised_speeds, ext_speed->autoneg, p_caps->default_ext_fec); } DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", link->speed.forced_speed, link->speed.advertised_speeds, link->speed.autoneg, link->pause.autoneg, p_caps->default_eee, p_caps->eee_lpi_timer, p_caps->fec_default); if (IS_LEAD_HWFN(p_hwfn)) { struct qed_dev *cdev = p_hwfn->cdev; /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, generic_cont0); generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> NVM_CFG1_GLOB_MF_MODE_OFFSET; switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); break; case NVM_CFG1_GLOB_MF_MODE_UFP: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_UFP_SPECIFIC) | BIT(QED_MF_8021Q_TAGGING) | BIT(QED_MF_DONT_ADD_VLAN0_TAG); break; case NVM_CFG1_GLOB_MF_MODE_BD: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_8021AD_TAGGING) | BIT(QED_MF_DONT_ADD_VLAN0_TAG); break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | BIT(QED_MF_INTER_PF_SWITCH) | BIT(QED_MF_DISABLE_ARFS); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST); if (QED_IS_BB(p_hwfn->cdev)) cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); break; } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", cdev->mf_bits); /* In CMT the PF is unknown when the GFS block processes the * packet. Therefore cannot use searcher as it has a per PF * database, and thus ARFS must be disabled. * */ if (QED_IS_CMT(cdev)) cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", p_hwfn->cdev->mf_bits); /* Read device capabilities information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, device_capabilities); device_capabilities = qed_rd(p_hwfn, p_ptt, addr); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) __set_bit(QED_DEV_CAP_FCOE, &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) __set_bit(QED_DEV_CAP_ISCSI, &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) __set_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities); /* Read device serial number information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, serial_number); for (i = 0; i < 4; i++) p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4); return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; struct qed_dev *cdev = p_hwfn->cdev; num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values * in the other bits are selected. * Bits 1-15 are for functions 1-15, respectively, and their value is * '0' only for enabled functions (function 0 always exists and * enabled). * In case of CMT, only the "even" functions are enabled, and thus the * number of functions for both hwfns is learnt from the same bits. */ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); if (reg_function_hide & 0x1) { if (QED_IS_BB(cdev)) { if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { num_funcs = 0; eng_mask = 0xaaaa; } else { num_funcs = 1; eng_mask = 0x5554; } } else { num_funcs = 1; eng_mask = 0xfffe; } /* Get the number of the enabled functions on the engine */ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; while (tmp) { if (tmp & 0x1) num_funcs++; tmp >>= 0x1; } /* Get the PF index within the enabled functions */ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; tmp = reg_function_hide & eng_mask & low_pfs_mask; while (tmp) { if (tmp & 0x1) enabled_func_idx--; tmp >>= 0x1; } } p_hwfn->num_funcs_on_engine = num_funcs; p_hwfn->enabled_func_idx = enabled_func_idx; DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr, global_offsize, global_addr, port_mode; struct qed_dev *cdev = p_hwfn->cdev; /* In CMT there is always only one port */ if (cdev->num_hwfns > 1) { cdev->num_ports_in_engine = 1; cdev->num_ports = 1; return; } /* Determine the number of ports per engine */ port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); switch (port_mode) { case 0x0: cdev->num_ports_in_engine = 1; break; case 0x1: cdev->num_ports_in_engine = 2; break; case 0x2: cdev->num_ports_in_engine = 4; break; default: DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); cdev->num_ports_in_engine = 1; /* Default to something */ break; } /* Get the total number of ports of the device */ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_GLOBAL); global_offsize = qed_rd(p_hwfn, p_ptt, addr); global_addr = SECTION_ADDR(global_offsize, 0); addr = global_addr + offsetof(struct public_global, max_ports); cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); } static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_link_capabilities *p_caps; u32 eee_status; p_caps = &p_hwfn->mcp_info->link_capabilities; if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) return; p_caps->eee_speed_caps = 0; eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, eee_status)); eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> EEE_SUPPORTED_SPEED_OFFSET; if (eee_status & EEE_1G_SUPPORTED) p_caps->eee_speed_caps |= QED_EEE_1G_ADV; if (eee_status & EEE_10G_ADV) p_caps->eee_speed_caps |= QED_EEE_10G_ADV; } static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_pci_personality personality) { int rc; /* Since all information is common, only first hwfns should do this */ if (IS_LEAD_HWFN(p_hwfn)) { rc = qed_iov_hw_info(p_hwfn); if (rc) return rc; } if (IS_LEAD_HWFN(p_hwfn)) qed_hw_info_port_num(p_hwfn, p_ptt); qed_mcp_get_capabilities(p_hwfn, p_ptt); qed_hw_get_nvm_info(p_hwfn, p_ptt); rc = qed_int_igu_read_cam(p_hwfn, p_ptt); if (rc) return rc; if (qed_mcp_is_init(p_hwfn)) ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, p_hwfn->mcp_info->func_info.mac); else eth_random_addr(p_hwfn->hw_info.hw_mac_addr); if (qed_mcp_is_init(p_hwfn)) { if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_get_eee_caps(p_hwfn, p_ptt); qed_mcp_read_ufp_config(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { enum qed_pci_personality protocol; protocol = p_hwfn->mcp_info->func_info.protocol; p_hwfn->hw_info.personality = protocol; } if (QED_IS_ROCE_PERSONALITY(p_hwfn)) p_hwfn->hw_info.multi_tc_roce_en = true; p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; p_hwfn->hw_info.num_active_tc = 1; qed_get_num_funcs(p_hwfn, p_ptt); if (qed_mcp_is_init(p_hwfn)) p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; return qed_hw_get_resc(p_hwfn, p_ptt); } static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; u16 device_id_mask; u32 tmp; /* Read Vendor Id / Device Id */ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); /* Determine type */ device_id_mask = cdev->device_id & QED_DEV_ID_MASK; switch (device_id_mask) { case QED_DEV_ID_MASK_BB: cdev->type = QED_DEV_TYPE_BB; break; case QED_DEV_ID_MASK_AH: cdev->type = QED_DEV_TYPE_AH; break; default: DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); return -EBUSY; } cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); MASK_FIELD(CHIP_REV, cdev->chip_rev); /* Learn number of HW-functions */ tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); if (tmp & (1 << p_hwfn->rel_pf_id)) { DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); cdev->num_hwfns = 2; } else { cdev->num_hwfns = 1; } cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG) >> 4; MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, cdev->chip_metal); DP_INFO(cdev->hwfns, "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", QED_IS_BB(cdev) ? "BB" : "AH", 'A' + cdev->chip_rev, (int)cdev->chip_metal, cdev->chip_num, cdev->chip_rev, cdev->chip_bond_id, cdev->chip_metal); return 0; } static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, u64 db_phys_addr, enum qed_pci_personality personality) { struct qed_dev *cdev = p_hwfn->cdev; int rc = 0; /* Split PCI bars evenly between hwfns */ p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; p_hwfn->db_phys_addr = db_phys_addr; if (IS_VF(p_hwfn->cdev)) return qed_vf_hw_prepare(p_hwfn); /* Validate that chip access is feasible */ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n"); return -EINVAL; } get_function_id(p_hwfn); /* Allocate PTT pool */ rc = qed_ptt_pool_alloc(p_hwfn); if (rc) goto err0; /* Allocate the main PTT */ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); if (rc) goto err1; } qed_hw_hwfn_prepare(p_hwfn); /* Initialize MCP structure */ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); if (rc) { DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); goto err1; } /* Read the device configuration information from the HW and SHMEM */ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); if (rc) { DP_NOTICE(p_hwfn, "Failed to get HW information\n"); goto err2; } /* Sending a mailbox to the MFW should be done after qed_get_hw_info() * is called as it sets the ports number in an engine. */ if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); if (rc) DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); } /* NVRAM info initialization and population */ if (IS_LEAD_HWFN(p_hwfn)) { rc = qed_mcp_nvm_info_populate(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to populate nvm info shadow\n"); goto err2; } } /* Allocate the init RT array and initialize the init-ops engine */ rc = qed_init_alloc(p_hwfn); if (rc) goto err3; return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); qed_mcp_free(p_hwfn); err1: qed_hw_hwfn_free(p_hwfn); err0: return rc; } int qed_hw_prepare(struct qed_dev *cdev, int personality) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int rc; /* Store the precompiled init data ptrs */ if (IS_PF(cdev)) qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ rc = qed_hw_prepare_single(p_hwfn, cdev->regview, cdev->doorbells, cdev->db_phys_addr, personality); if (rc) return rc; personality = p_hwfn->hw_info.personality; /* Initialize the rest of the hwfns */ if (cdev->num_hwfns > 1) { void __iomem *p_regview, *p_doorbell; u64 db_phys_addr; u32 offset; /* adjust bar offset for second engine */ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, BAR_ID_0) / 2; p_regview = cdev->regview + offset; offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, BAR_ID_1) / 2; p_doorbell = cdev->doorbells + offset; db_phys_addr = cdev->db_phys_addr + offset; /* prepare second hw function */ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, p_doorbell, db_phys_addr, personality); /* in case of error, need to free the previously * initiliazed hwfn 0. */ if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } } } return rc; } void qed_hw_remove(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int i; if (IS_PF(cdev)) qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, QED_OV_DRIVER_STATE_NOT_LOADED); for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (IS_VF(cdev)) { qed_vf_pf_release(p_hwfn); continue; } qed_init_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn); } qed_iov_free_hw_info(cdev); qed_mcp_nvm_info_free(p_hwfn); } int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", src_id, min, max); return -EINVAL; } *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; return 0; } int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { u8 min, max; min = (u8)RESC_START(p_hwfn, QED_VPORT); max = min + RESC_NUM(p_hwfn, QED_VPORT); DP_NOTICE(p_hwfn, "vport id [%d] is not valid, available indices [%d - %d]\n", src_id, min, max); return -EINVAL; } *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; return 0; } int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { u8 min, max; min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); DP_NOTICE(p_hwfn, "rss_eng id [%d] is not valid, available indices [%d - %d]\n", src_id, min, max); return -EINVAL; } *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; return 0; } static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *p_eth_qzone, size_t eth_qzone_size, u8 timeset) { struct coalescing_timeset *p_coal_timeset; if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); return -EINVAL; } p_coal_timeset = p_eth_qzone; memset(p_eth_qzone, 0, eth_qzone_size); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); return 0; } int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) { struct qed_queue_cid *p_cid = p_handle; struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; int rc = 0; p_hwfn = p_cid->p_owner; if (IS_VF(p_hwfn->cdev)) return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; if (rx_coal) { rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); if (rc) goto out; p_hwfn->cdev->rx_coalesce_usecs = rx_coal; } if (tx_coal) { rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); if (rc) goto out; p_hwfn->cdev->tx_coalesce_usecs = tx_coal; } out: qed_ptt_release(p_hwfn, p_ptt); return rc; } int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 coalesce, struct qed_queue_cid *p_cid) { struct ustorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; u32 address; int rc; /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ if (coalesce <= 0x7F) { timer_res = 0; } else if (coalesce <= 0xFF) { timer_res = 1; } else if (coalesce <= 0x1FF) { timer_res = 2; } else { DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); return -EINVAL; } timeset = (u8)(coalesce >> timer_res); rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, p_cid->sb_igu_id, false); if (rc) goto out; address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); if (rc) goto out; out: return rc; } int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 coalesce, struct qed_queue_cid *p_cid) { struct xstorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; u32 address; int rc; /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ if (coalesce <= 0x7F) { timer_res = 0; } else if (coalesce <= 0xFF) { timer_res = 1; } else if (coalesce <= 0x1FF) { timer_res = 2; } else { DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); return -EINVAL; } timeset = (u8)(coalesce >> timer_res); rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, p_cid->sb_igu_id, true); if (rc) goto out; address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); out: return rc; } /* Calculate final WFQ values for all vports and configure them. * After this configuration each vport will have * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) */ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 min_pf_rate) { struct init_qm_vport_params *vport_params; int i; vport_params = p_hwfn->qm_info.qm_vport_params; for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) / min_pf_rate; qed_init_vport_wfq(p_hwfn, p_ptt, vport_params[i].first_tx_pq_id, vport_params[i].wfq); } } static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, u32 min_pf_rate) { int i; for (i = 0; i < p_hwfn->qm_info.num_vports; i++) p_hwfn->qm_info.qm_vport_params[i].wfq = 1; } static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 min_pf_rate) { struct init_qm_vport_params *vport_params; int i; vport_params = p_hwfn->qm_info.qm_vport_params; for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { qed_init_wfq_default_param(p_hwfn, min_pf_rate); qed_init_vport_wfq(p_hwfn, p_ptt, vport_params[i].first_tx_pq_id, vport_params[i].wfq); } } /* This function performs several validations for WFQ * configuration and required min rate for a given vport * 1. req_rate must be greater than one percent of min_pf_rate. * 2. req_rate should not cause other vports [not configured for WFQ explicitly] * rates to get less than one percent of min_pf_rate. * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. */ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, u16 vport_id, u32 req_rate, u32 min_pf_rate) { u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; int non_requested_count = 0, req_count = 0, i, num_vports; num_vports = p_hwfn->qm_info.num_vports; if (num_vports < 2) { DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); return -EINVAL; } /* Accounting for the vports which are configured for WFQ explicitly */ for (i = 0; i < num_vports; i++) { u32 tmp_speed; if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { req_count++; tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; total_req_min_rate += tmp_speed; } } /* Include current vport data as well */ req_count++; total_req_min_rate += req_rate; non_requested_count = num_vports - req_count; if (req_rate < min_pf_rate / QED_WFQ_UNIT) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", vport_id, req_rate, min_pf_rate); return -EINVAL; } if (num_vports > QED_WFQ_UNIT) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Number of vports is greater than %d\n", QED_WFQ_UNIT); return -EINVAL; } if (total_req_min_rate > min_pf_rate) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", total_req_min_rate, min_pf_rate); return -EINVAL; } total_left_rate = min_pf_rate - total_req_min_rate; left_rate_per_vp = total_left_rate / non_requested_count; if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", left_rate_per_vp, min_pf_rate); return -EINVAL; } p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; p_hwfn->qm_info.wfq_data[vport_id].configured = true; for (i = 0; i < num_vports; i++) { if (p_hwfn->qm_info.wfq_data[i].configured) continue; p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; } return 0; } static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 vp_id, u32 rate) { struct qed_mcp_link_state *p_link; int rc = 0; p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; if (!p_link->min_pf_rate) { p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; p_hwfn->qm_info.wfq_data[vp_id].configured = true; return rc; } rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); if (!rc) qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, p_link->min_pf_rate); else DP_NOTICE(p_hwfn, "Validation failed while configuring min rate\n"); return rc; } static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 min_pf_rate) { bool use_wfq = false; int rc = 0; u16 i; /* Validate all pre configured vports for wfq */ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { u32 rate; if (!p_hwfn->qm_info.wfq_data[i].configured) continue; rate = p_hwfn->qm_info.wfq_data[i].min_speed; use_wfq = true; rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); if (rc) { DP_NOTICE(p_hwfn, "WFQ validation failed while configuring min rate\n"); break; } } if (!rc && use_wfq) qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); else qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); return rc; } /* Main API for qed clients to configure vport min rate. * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] * rate - Speed in Mbps needs to be assigned to a given vport. */ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) { int i, rc = -EINVAL; /* Currently not supported; Might change in future */ if (cdev->num_hwfns > 1) { DP_NOTICE(cdev, "WFQ configuration is not supported for this device\n"); return rc; } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } qed_ptt_release(p_hwfn, p_ptt); } return rc; } /* API to configure WFQ from mcp link change */ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, struct qed_ptt *p_ptt, u32 min_pf_rate) { int i; if (cdev->num_hwfns > 1) { DP_VERBOSE(cdev, NETIF_MSG_LINK, "WFQ configuration is not supported for this device\n"); return; } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, min_pf_rate); } } int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link, u8 max_bw) { int rc = 0; p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; if (!p_link->line_speed && (max_bw != 100)) return rc; p_link->speed = (p_link->line_speed * max_bw) / 100; p_hwfn->qm_info.pf_rl = p_link->speed; /* Since the limiter also affects Tx-switched traffic, we don't want it * to limit such traffic in case there's no actual limit. * In that case, set limit to imaginary high boundary. */ if (max_bw == 100) p_hwfn->qm_info.pf_rl = 100000; rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_hwfn->qm_info.pf_rl); DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configured MAX bandwidth to be %08x Mb/sec\n", p_link->speed); return rc; } /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) { int i, rc = -EINVAL; if (max_bw < 1 || max_bw > 100) { DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); return rc; } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); struct qed_mcp_link_state *p_link; struct qed_ptt *p_ptt; p_link = &p_lead->mcp_info->link_output; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); qed_ptt_release(p_hwfn, p_ptt); if (rc) break; } return rc; } int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link, u8 min_bw) { int rc = 0; p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; p_hwfn->qm_info.pf_wfq = min_bw; if (!p_link->line_speed) return rc; p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configured MIN bandwidth to be %d Mb/sec\n", p_link->min_pf_rate); return rc; } /* Main API to configure PF min bandwidth where bw range is [1-100] */ int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) { int i, rc = -EINVAL; if (min_bw < 1 || min_bw > 100) { DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); return rc; } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); struct qed_mcp_link_state *p_link; struct qed_ptt *p_ptt; p_link = &p_lead->mcp_info->link_output; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } if (p_link->min_pf_rate) { u32 min_rate = p_link->min_pf_rate; rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, min_rate); } qed_ptt_release(p_hwfn, p_ptt); } return rc; } void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_link_state *p_link; p_link = &p_hwfn->mcp_info->link_output; if (p_link->min_pf_rate) qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, p_link->min_pf_rate); memset(p_hwfn->qm_info.wfq_data, 0, sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); } int qed_device_num_ports(struct qed_dev *cdev) { return cdev->num_ports; } void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) { ((u8 *)fw_msb)[0] = mac[1]; ((u8 *)fw_msb)[1] = mac[0]; ((u8 *)fw_mid)[0] = mac[3]; ((u8 *)fw_mid)[1] = mac[2]; ((u8 *)fw_lsb)[0] = mac[5]; ((u8 *)fw_lsb)[1] = mac[4]; } static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid) { struct qed_llh_info *p_llh_info = cdev->p_llh_info; struct qed_llh_filter_info *p_filters; int rc; rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all"); if (rc) return rc; p_filters = p_llh_info->pp_filters[ppfid]; memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters)); return 0; } static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u8 filter_idx, abs_ppfid; int rc = 0; if (!p_ptt) return; if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) goto out; rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); if (rc) goto out; rc = qed_llh_shadow_remove_all_filters(cdev, ppfid); if (rc) goto out; for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; filter_idx++) { rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx); if (rc) goto out; } out: qed_ptt_release(p_hwfn, p_ptt); } int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) { return qed_llh_add_protocol_filter(cdev, 0, QED_LLH_FILTER_TCP_SRC_PORT, src_port, QED_LLH_DONT_CARE); } void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) { qed_llh_remove_protocol_filter(cdev, 0, QED_LLH_FILTER_TCP_SRC_PORT, src_port, QED_LLH_DONT_CARE); } int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) { return qed_llh_add_protocol_filter(cdev, 0, QED_LLH_FILTER_TCP_DEST_PORT, QED_LLH_DONT_CARE, dest_port); } void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) { qed_llh_remove_protocol_filter(cdev, 0, QED_LLH_FILTER_TCP_DEST_PORT, QED_LLH_DONT_CARE, dest_port); } void qed_llh_clear_all_filters(struct qed_dev *cdev) { u8 ppfid; if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) return; for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) qed_llh_clear_ppfid_filters(cdev, ppfid); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_dev.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include "qed.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" struct qed_pi_info { qed_int_comp_cb_t comp_cb; void *cookie; }; struct qed_sb_sp_info { struct qed_sb_info sb_info; /* per protocol index data */ struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; enum qed_attention_type { QED_ATTN_TYPE_ATTN, QED_ATTN_TYPE_PARITY, }; #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) struct aeu_invert_reg_bit { char bit_name[30]; #define ATTENTION_PARITY (1 << 0) #define ATTENTION_LENGTH_MASK (0x00000ff0) #define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ ATTENTION_LENGTH_SHIFT) #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ ATTENTION_PARITY) /* Multiple bits start with this offset */ #define ATTENTION_OFFSET_MASK (0x000ff000) #define ATTENTION_OFFSET_SHIFT (12) #define ATTENTION_BB_MASK (0x00700000) #define ATTENTION_BB_SHIFT (20) #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) #define ATTENTION_BB_DIFFERENT BIT(23) #define ATTENTION_CLEAR_ENABLE BIT(28) unsigned int flags; /* Callback to call if attention will be triggered */ int (*cb)(struct qed_hwfn *p_hwfn); enum block_id block_index; }; struct aeu_invert_reg { struct aeu_invert_reg_bit bits[32]; }; #define MAX_ATTN_GRPS (8) #define NUM_ATTN_REGS (9) /* Specific HW attention callbacks */ static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) { u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); /* This might occur on certain instances; Log it once then mask it */ DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); return 0; } #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) { u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_INCORRECT_ACCESS_VALID); if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { u32 addr, data, length; addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_INCORRECT_ACCESS_ADDRESS); data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_INCORRECT_ACCESS_DATA); length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_INCORRECT_ACCESS_LENGTH); DP_INFO(p_hwfn->cdev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", addr, length, (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_VALID), (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_CLIENT), (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_BYTE_EN), data); } return 0; } #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) #define QED_GRC_ATTENTION_MASTER_MASK (0xf) #define QED_GRC_ATTENTION_MASTER_SHIFT (24) #define QED_GRC_ATTENTION_PF_MASK (0xf) #define QED_GRC_ATTENTION_PF_SHIFT (0) #define QED_GRC_ATTENTION_VF_MASK (0xff) #define QED_GRC_ATTENTION_VF_SHIFT (4) #define QED_GRC_ATTENTION_PRIV_MASK (0x3) #define QED_GRC_ATTENTION_PRIV_SHIFT (14) #define QED_GRC_ATTENTION_PRIV_VF (0) static const char *attn_master_to_str(u8 master) { switch (master) { case 1: return "PXP"; case 2: return "MCP"; case 3: return "MSDM"; case 4: return "PSDM"; case 5: return "YSDM"; case 6: return "USDM"; case 7: return "TSDM"; case 8: return "XSDM"; case 9: return "DBU"; case 10: return "DMAE"; default: return "Unknown"; } } static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) { u32 tmp, tmp2; /* We've already cleared the timeout interrupt register, so we learn * of interrupts via the validity register */ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) goto out; /* Read the GRC timeout information */ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); DP_INFO(p_hwfn->cdev, "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", tmp2, tmp, (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); out: /* Regardles of anything else, clean the validity bit */ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); return 0; } #define PGLUE_ATTENTION_VALID (1 << 29) #define PGLUE_ATTENTION_RD_VALID (1 << 26) #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool hw_init) { char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & PGLUE_ATTENTION_VALID) { u32 addr_lo, addr_hi, details; addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_31_0); addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); snprintf(msg, sizeof(msg), "Illegal write by chip to [%08x:%08x] blocked.\n" "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", addr_hi, addr_lo, details, (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), tmp, !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); if (hw_init) DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); else DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); if (tmp & PGLUE_ATTENTION_RD_VALID) { u32 addr_lo, addr_hi, details; addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_31_0); addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_63_32); details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS); DP_NOTICE(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked.\n" "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", addr_hi, addr_lo, details, (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, tmp, GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & PGLUE_ATTENTION_ICPL_VALID) { snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); if (hw_init) DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); else DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { u32 addr_hi, addr_lo; addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", tmp, addr_hi, addr_lo); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); if (tmp & PGLUE_ATTENTION_ILT_VALID) { u32 addr_hi, addr_lo, details; addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS); DP_NOTICE(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", details, tmp, addr_hi, addr_lo); } /* Clear the indications */ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); return 0; } static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static int qed_fw_assertion(struct qed_hwfn *p_hwfn) { qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, "FW assertion!\n"); /* Clear assert indications */ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0); return -EINVAL; } static int qed_general_attention_35(struct qed_hwfn *p_hwfn) { DP_INFO(p_hwfn, "General attention 35!\n"); return 0; } #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) #define QED_DB_REC_COUNT 1000 #define QED_DB_REC_INTERVAL 100 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 count = QED_DB_REC_COUNT; u32 usage = 1; /* Flush any pending (e)dpms as they may never arrive */ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); /* wait for usage to zero or count to run out. This is necessary since * EDPM doorbell transactions can take multiple 64b cycles, and as such * can "split" over the pci. Possibly, the doorbell drop can happen with * half an EDPM in the queue and other half dropped. Another EDPM * doorbell to the same address (from doorbell recovery mechanism or * from the doorbelling entity) could have first half dropped and second * half interpreted as continuation of the first. To prevent such * malformed doorbells from reaching the device, flush the queue before * releasing the overflow sticky indication. */ while (count-- && usage) { usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); udelay(QED_DB_REC_INTERVAL); } /* should have been depleted by now */ if (usage) { DP_NOTICE(p_hwfn->cdev, "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); return -EBUSY; } return 0; } int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 attn_ovfl, cur_ovfl; int rc; attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); if (!cur_ovfl && !attn_ovfl) return 0; DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", attn_ovfl, cur_ovfl); if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) return rc; } /* Release overflow sticky indication (stop silently dropping everything) */ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); /* Repeat all last doorbells (doorbell drop recovery) */ qed_db_recovery_execute(p_hwfn); return 0; } static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) { struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; u32 overflow; int rc; overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); if (!overflow) goto out; /* Run PF doorbell recovery in next periodic handler */ set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); if (!p_hwfn->db_bar_no_edpm) { rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) goto out; } qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); out: /* Schedule the handler even if overflow was not detected */ qed_periodic_db_rec_start(p_hwfn); } static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) { u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (int_sts == 0xdeadbeaf) { DP_NOTICE(p_hwfn->cdev, "DORQ is being reset, skipping int_sts handler\n"); return 0; } /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); /* check if db_drop or overflow happened */ if (int_sts & (DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { /* Obtain data about db drop/overflow */ first_drop_reason = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_REASON) & QED_DORQ_ATTENTION_REASON_MASK; details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); address = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_ADDRESS); all_drops_reason = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REASON); /* Log info */ DP_NOTICE(p_hwfn->cdev, "Doorbell drop occurred\n" "Address\t\t0x%08x\t(second BAR address)\n" "FID\t\t0x%04x\t\t(Opaque FID)\n" "Size\t\t0x%04x\t\t(in bytes)\n" "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", address, GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, first_drop_reason, all_drops_reason); /* Clear the doorbell drop details and prepare for next drop */ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); /* Mark interrupt as handled (note: even if drop was due to a different * reason than overflow we mark as handled) */ qed_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); /* If there are no indications other than drop indications, success */ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) return 0; } /* Some other indication was present - non recoverable */ DP_INFO(p_hwfn, "DORQ fatal attention\n"); return -EINVAL; } static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) { if (p_hwfn->cdev->recov_in_prog) return 0; p_hwfn->db_recovery_info.dorq_attn = true; qed_dorq_attn_overflow(p_hwfn); return qed_dorq_attn_int_sts(p_hwfn); } static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) { if (p_hwfn->db_recovery_info.dorq_attn) goto out; /* Call DORQ callback if the attention was missed */ qed_dorq_attn_cb(p_hwfn); out: p_hwfn->db_recovery_info.dorq_attn = false; } /* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters. */ enum aeu_invert_reg_special_type { AEU_INVERT_REG_SPECIAL_CNIG_0, AEU_INVERT_REG_SPECIAL_CNIG_1, AEU_INVERT_REG_SPECIAL_CNIG_2, AEU_INVERT_REG_SPECIAL_CNIG_3, AEU_INVERT_REG_SPECIAL_MAX, }; static struct aeu_invert_reg_bit aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, }; /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { { { /* After Invert 1 */ {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 2 */ {"PGLUE config_space", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PGLUE misc_flr", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PGLUE B RBC", ATTENTION_PAR_INT, qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, {"PGLUE misc_mctp", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, } }, { { /* After Invert 3 */ {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 4 */ {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, qed_fw_assertion, MAX_BLOCK_ID}, {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, qed_general_attention_35, MAX_BLOCK_ID}, {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), NULL, BLOCK_NWS}, {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), NULL, BLOCK_NWS}, {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), NULL, BLOCK_NWM}, {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), NULL, BLOCK_NWM}, {"MCP CPU", ATTENTION_SINGLE, qed_mcp_attn_cb, MAX_BLOCK_ID}, {"MCP Watchdog timer", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"AVS stop status ready", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, {"MSTAT per-path", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, } }, { { /* After Invert 5 */ {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, } }, { { /* After Invert 6 */ {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, {"DORQ", ATTENTION_PAR_INT, qed_dorq_attn_cb, BLOCK_DORQ}, {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, } }, { { /* After Invert 7 */ {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, } }, { { /* After Invert 8 */ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ2}, {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, {"PSWWR (pci_clk)", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR2}, {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, {"PSWRD (pci_clk)", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD2}, {"PSWHST", ATTENTION_PAR_INT, qed_pswhst_attn_cb, BLOCK_PSWHST}, {"PSWHST (pci_clk)", ATTENTION_PAR_INT, NULL, BLOCK_PSWHST2}, {"GRC", ATTENTION_PAR_INT, qed_grc_attn_cb, BLOCK_GRC}, {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, {"PERST_B assertion", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PERST_B deassertion", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 9 */ {"MCP Latched memory", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"MCP Latched scratchpad cache", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"MCP Latched ump_tx", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"MCP Latched scratchpad", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, }; static struct aeu_invert_reg_bit * qed_int_aeu_translate(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_bit) { if (!QED_IS_BB(p_hwfn->cdev)) return p_bit; if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) return p_bit; return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> ATTENTION_BB_SHIFT]; } static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_bit) { return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & ATTENTION_PARITY); } #define ATTN_STATE_BITS (0xfff) #define ATTN_BITS_MASKABLE (0x3ff) struct qed_sb_attn_info { /* Virtual & Physical address of the SB */ struct atten_status_block *sb_attn; dma_addr_t sb_phys; /* Last seen running index */ u16 index; /* A mask of the AEU bits resulting in a parity error */ u32 parity_mask[NUM_ATTN_REGS]; /* A pointer to the attention description structure */ struct aeu_invert_reg *p_aeu_desc; /* Previously asserted attentions, which are still unasserted */ u16 known_attn; /* Cleanup address for the link's general hw attention */ u32 mfw_attn_addr; }; static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, struct qed_sb_attn_info *p_sb_desc) { u16 rc = 0, index; index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); if (p_sb_desc->index != index) { p_sb_desc->index = index; rc = QED_SB_ATT_IDX; } return rc; } /** * qed_int_assertion() - Handle asserted attention bits. * * @p_hwfn: HW device data. * @asserted_bits: Newly asserted bits. * * Return: Zero value. */ static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 igu_mask; /* Mask the source of the attention in the IGU */ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "inner known ATTN state: 0x%04x --> 0x%04x\n", sb_attn_sw->known_attn, sb_attn_sw->known_attn | asserted_bits); sb_attn_sw->known_attn |= asserted_bits; /* Handle MCP events */ if (asserted_bits & 0x100) { qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); /* Clean the MCP attention */ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, sb_attn_sw->mfw_attn_addr, 0); } DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + ((IGU_CMD_ATTN_BIT_SET_UPPER - IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", asserted_bits); return 0; } static void qed_int_attn_print(struct qed_hwfn *p_hwfn, enum block_id id, enum dbg_attn_type type, bool b_clear) { struct dbg_attn_block_result attn_results; enum dbg_status status; memset(&attn_results, 0, sizeof(attn_results)); status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, b_clear, &attn_results); if (status != DBG_STATUS_OK) DP_NOTICE(p_hwfn, "Failed to parse attention information [status: %s]\n", qed_dbg_get_status_str(status)); else qed_dbg_parse_attn(p_hwfn, &attn_results); } /** * qed_int_deassertion_aeu_bit() - Handles the effects of a single * cause of the attention. * * @p_hwfn: HW device data. * @p_aeu: Descriptor of an AEU bit which caused the attention. * @aeu_en_reg: Register offset of the AEU enable reg. which configured * this bit to this group. * @p_bit_name: AEU bit description for logging purposes. * @bitmask: Index of this bit in the aeu_en_reg. * * Return: Zero on success, negative errno otherwise. */ static int qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu, u32 aeu_en_reg, const char *p_bit_name, u32 bitmask) { bool b_fatal = false; int rc = -EINVAL; u32 val; DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", p_bit_name, bitmask); /* Call callback before clearing the interrupt status */ if (p_aeu->cb) { DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", p_bit_name); rc = p_aeu->cb(p_hwfn); } if (rc) b_fatal = true; /* Print HW block interrupt registers */ if (p_aeu->block_index != MAX_BLOCK_ID) qed_int_attn_print(p_hwfn, p_aeu->block_index, ATTN_TYPE_INTERRUPT, !b_fatal); /* Reach assertion if attention is fatal */ if (b_fatal) qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, "`%s': Fatal attention\n", p_bit_name); else /* If the attention is benign, no need to prevent it */ goto out; /* Prevent this Attention from being asserted in the future */ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", p_bit_name); /* Re-enable FW aassertion (Gen 32) interrupts */ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32; qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, val); out: return rc; } /** * qed_int_deassertion_parity() - Handle a single parity AEU source. * * @p_hwfn: HW device data. * @p_aeu: Descriptor of an AEU bit which caused the parity. * @aeu_en_reg: Address of the AEU enable register. * @bit_index: Index (0-31) of an AEU bit. */ static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu, u32 aeu_en_reg, u8 bit_index) { u32 block_id = p_aeu->block_index, mask, val; DP_NOTICE(p_hwfn->cdev, "%s parity attention is set [address 0x%08x, bit %d]\n", p_aeu->bit_name, aeu_en_reg, bit_index); if (block_id != MAX_BLOCK_ID) { qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); /* In BB, there's a single parity bit for several blocks */ if (block_id == BLOCK_BTB) { qed_int_attn_print(p_hwfn, BLOCK_OPTE, ATTN_TYPE_PARITY, false); qed_int_attn_print(p_hwfn, BLOCK_MCP, ATTN_TYPE_PARITY, false); } } /* Prevent this parity error from being re-asserted */ mask = ~BIT(bit_index); val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", p_aeu->bit_name); } /** * qed_int_deassertion() - Handle deassertion of previously asserted * attentions. * * @p_hwfn: HW device data. * @deasserted_bits: newly deasserted bits. * * Return: Zero value. */ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; u8 i, j, k, bit_idx; int rc = 0; /* Read the attention registers in the AEU */ for (i = 0; i < NUM_ATTN_REGS; i++) { aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_AFTER_INVERT_1_IGU + i * 0x4); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); } /* Find parity attentions first */ for (i = 0; i < NUM_ATTN_REGS; i++) { struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; u32 parities; aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); /* Skip register in which no parity bit is currently set */ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; if (!parities) continue; for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if (qed_int_is_parity_flag(p_hwfn, p_bit) && !!(parities & BIT(bit_idx))) qed_int_deassertion_parity(p_hwfn, p_bit, aeu_en, bit_idx); bit_idx += ATTENTION_LENGTH(p_bit->flags); } } /* Find non-parity cause for attention and act */ for (k = 0; k < MAX_ATTN_GRPS; k++) { struct aeu_invert_reg_bit *p_aeu; /* Handle only groups whose attention is currently deasserted */ if (!(deasserted_bits & (1 << k))) continue; for (i = 0; i < NUM_ATTN_REGS; i++) { u32 bits; aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS; en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); bits = aeu_inv_arr[i] & en; /* Skip if no bit from this group is currently set */ if (!bits) continue; /* Find all set bits from current register which belong * to current group, making them responsible for the * previous assertion. */ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { long unsigned int bitmask; u8 bit, bit_len; p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); bit = bit_idx; bit_len = ATTENTION_LENGTH(p_aeu->flags); if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { /* Skip Parity */ bit++; bit_len--; } bitmask = bits & (((1 << bit_len) - 1) << bit); bitmask >>= bit; if (bitmask) { u32 flags = p_aeu->flags; char bit_name[30]; u8 num; num = (u8)find_first_bit(&bitmask, bit_len); /* Some bits represent more than a * single interrupt. Correctly print * their name. */ if (ATTENTION_LENGTH(flags) > 2 || ((flags & ATTENTION_PAR_INT) && ATTENTION_LENGTH(flags) > 1)) snprintf(bit_name, 30, p_aeu->bit_name, num); else strscpy(bit_name, p_aeu->bit_name, 30); /* We now need to pass bitmask in its * correct position. */ bitmask <<= bit; /* Handle source of the attention */ qed_int_deassertion_aeu_bit(p_hwfn, p_aeu, aeu_en, bit_name, bitmask); } bit_idx += ATTENTION_LENGTH(p_aeu->flags); } } } /* Handle missed DORQ attention */ qed_dorq_attn_handler(p_hwfn); /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + ((IGU_CMD_ATTN_BIT_CLR_UPPER - IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); /* Clear deassertion from inner state */ sb_attn_sw->known_attn &= ~deasserted_bits; return rc; } static int qed_int_attentions(struct qed_hwfn *p_hwfn) { struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; u32 attn_bits = 0, attn_acks = 0; u16 asserted_bits, deasserted_bits; __le16 index; int rc = 0; /* Read current attention bits/acks - safeguard against attentions * by guaranting work on a synchronized timeframe */ do { index = p_sb_attn->sb_index; /* finish reading index before the loop condition */ dma_rmb(); attn_bits = le32_to_cpu(p_sb_attn->atten_bits); attn_acks = le32_to_cpu(p_sb_attn->atten_ack); } while (index != p_sb_attn->sb_index); p_sb_attn->sb_index = index; /* Attention / Deassertion are meaningful (and in correct state) * only when they differ and consistent with known state - deassertion * when previous attention & current ack, and assertion when current * attention with no previous attention */ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & ~p_sb_attn_sw->known_attn; deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & p_sb_attn_sw->known_attn; if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { DP_INFO(p_hwfn, "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); } if (asserted_bits) { rc = qed_int_assertion(p_hwfn, asserted_bits); if (rc) return rc; } if (deasserted_bits) rc = qed_int_deassertion(p_hwfn, deasserted_bits); return rc; } static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, void __iomem *igu_addr, u32 ack_cons) { u32 igu_ack; igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | (IGU_SEG_ACCESS_ATTN << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); DIRECT_REG_WR(igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ barrier(); } void qed_int_sp_dpc(struct tasklet_struct *t) { struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc); struct qed_pi_info *pi_info = NULL; struct qed_sb_attn_info *sb_attn; struct qed_sb_info *sb_info; int arr_size; u16 rc = 0; if (!p_hwfn->p_sp_sb) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); return; } sb_info = &p_hwfn->p_sp_sb->sb_info; arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); if (!sb_info) { DP_ERR(p_hwfn->cdev, "Status block is NULL - cannot ack interrupts\n"); return; } if (!p_hwfn->p_sb_attn) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); return; } sb_attn = p_hwfn->p_sb_attn; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", p_hwfn, p_hwfn->my_id); /* Disable ack for def status block. Required both for msix + * inta in non-mask mode, in inta does no harm. */ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); /* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) { DP_ERR(p_hwfn->cdev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); } else { u32 tmp_index = sb_info->sb_ack; rc = qed_sb_update_sb_idx(sb_info); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Interrupt indices: 0x%08x --> 0x%08x\n", tmp_index, sb_info->sb_ack); } if (!sb_attn || !sb_attn->sb_attn) { DP_ERR(p_hwfn->cdev, "Attentions Status block is NULL - cannot check for new attentions!\n"); } else { u16 tmp_index = sb_attn->index; rc |= qed_attn_update_idx(p_hwfn, sb_attn); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Attention indices: 0x%08x --> 0x%08x\n", tmp_index, sb_attn->index); } /* Check if we expect interrupts at this time. if not just ack them */ if (!(rc & QED_SB_EVENT_MASK)) { qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } /* Check the validity of the DPC ptt. If not ack interrupts and fail */ if (!p_hwfn->p_dpc_ptt) { DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } if (rc & QED_SB_ATT_IDX) qed_int_attentions(p_hwfn); if (rc & QED_SB_IDX) { int pi; /* Look for a free index */ for (pi = 0; pi < arr_size; pi++) { pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; if (pi_info->comp_cb) pi_info->comp_cb(p_hwfn, pi_info->cookie); } } if (sb_attn && (rc & QED_SB_ATT_IDX)) /* This should be done before the interrupts are enabled, * since otherwise a new attention will be generated. */ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); } static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) { struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; if (!p_sb) return; if (p_sb->sb_attn) dma_free_coherent(&p_hwfn->cdev->pdev->dev, SB_ATTN_ALIGNED_SIZE(p_hwfn), p_sb->sb_attn, p_sb->sb_phys); kfree(p_sb); p_hwfn->p_sb_attn = NULL; } static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); sb_info->index = 0; sb_info->known_attn = 0; /* Configure Attention Status Block in IGU */ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); } static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, void *sb_virt_addr, dma_addr_t sb_phy_addr) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; int i, j, k; sb_info->sb_attn = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; /* Set the pointer to the AEU descriptors */ sb_info->p_aeu_desc = aeu_descs; /* Calculate Parity Masks */ memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ for (j = 0, k = 0; k < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_aeu; p_aeu = &aeu_descs[i].bits[j]; if (qed_int_is_parity_flag(p_hwfn, p_aeu)) sb_info->parity_mask[i] |= 1 << k; k += ATTENTION_LENGTH(p_aeu->flags); } DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Attn Mask [Reg %d]: 0x%08x\n", i, sb_info->parity_mask[i]); } /* Set the address of cleanup for the mcp attention */ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0; qed_int_sb_attn_setup(p_hwfn, p_ptt); } static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; struct qed_sb_attn_info *p_sb; dma_addr_t p_phys = 0; void *p_virt; /* SB struct */ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) return -ENOMEM; /* SB ring */ p_virt = dma_alloc_coherent(&cdev->pdev->dev, SB_ATTN_ALIGNED_SIZE(p_hwfn), &p_phys, GFP_KERNEL); if (!p_virt) { kfree(p_sb); return -ENOMEM; } /* Attention setup */ p_hwfn->p_sb_attn = p_sb; qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); return 0; } /* coalescing timeout = timeset << (timer_res + 1) */ #define QED_CAU_DEF_RX_USECS 24 #define QED_CAU_DEF_TX_USECS 48 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, u8 pf_id, u16 vf_number, u8 vf_valid) { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state, params = 0, data = 0; u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id); SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number); SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid); SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); cau_state = CAU_HC_DISABLE_STATE; if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { cau_state = CAU_HC_ENABLE_STATE; if (!cdev->rx_coalesce_usecs) cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; if (!cdev->tx_coalesce_usecs) cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ if (cdev->rx_coalesce_usecs <= 0x7F) timer_res = 0; else if (cdev->rx_coalesce_usecs <= 0xFF) timer_res = 1; else timer_res = 2; SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); if (cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; else if (cdev->tx_coalesce_usecs <= 0xFF) timer_res = 1; else timer_res = 2; SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); p_sb_entry->params = cpu_to_le32(params); SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state); SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state); p_sb_entry->data = cpu_to_le32(data); } static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 igu_sb_id, u32 pi_index, enum qed_coalescing_fsm coalescing_fsm, u8 timeset) { u32 sb_offset, pi_offset; u32 prod = 0; if (IS_VF(p_hwfn->cdev)) return; SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset); if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0); else SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); sb_offset = igu_sb_id * PIS_PER_SB; pi_offset = sb_offset + pi_index; if (p_hwfn->hw_init_done) qed_wr(p_hwfn, p_ptt, CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod); else STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, prod); } void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, u16 igu_sb_id, u16 vf_number, u8 vf_valid) { struct cau_sb_entry sb_entry; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, vf_number, vf_valid); if (p_hwfn->hw_init_done) { /* Wide-bus, initialize via DMAE */ u64 phys_addr = (u64)sb_phys; qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64), 2, NULL); qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64), 2, NULL); } else { /* Initialize Status Block Address */ STORE_RT_REG_AGG(p_hwfn, CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + igu_sb_id * 2, sb_phys); STORE_RT_REG_AGG(p_hwfn, CAU_REG_SB_VAR_MEMORY_RT_OFFSET + igu_sb_id * 2, sb_entry); } /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { u8 num_tc = p_hwfn->hw_info.num_hw_tc; u8 timeset, timer_res; u8 i; /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) timer_res = 0; else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) timer_res = 1; else timer_res = 2; timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, QED_COAL_RX_STATE_MACHINE, timeset); if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) timer_res = 1; else timer_res = 2; timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); for (i = 0; i < num_tc; i++) { qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, TX_PI(i), QED_COAL_TX_STATE_MACHINE, timeset); } } } void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) { /* zero status block and ack counter */ sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); if (IS_PF(p_hwfn->cdev)) qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, sb_info->igu_sb_id, 0, 0); } struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) { struct qed_igu_block *p_block; u16 igu_id; for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_id++) { p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; if (!(p_block->status & QED_IGU_STATUS_VALID) || !(p_block->status & QED_IGU_STATUS_FREE)) continue; if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) return p_block; } return NULL; } static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) { struct qed_igu_block *p_block; u16 igu_id; for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_id++) { p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; if (!(p_block->status & QED_IGU_STATUS_VALID) || !p_block->is_pf || p_block->vector_number != vector_id) continue; return igu_id; } return QED_SB_INVALID_IDX; } u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { u16 igu_sb_id; /* Assuming continuous set of IGU SBs dedicated for given PF */ if (sb_id == QED_SP_SB_ID) igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; else if (IS_PF(p_hwfn->cdev)) igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); else igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); if (sb_id == QED_SP_SB_ID) DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); else DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); return igu_sb_id; } int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info, void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) { sb_info->sb_virt = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); if (sb_id != QED_SP_SB_ID) { if (IS_PF(p_hwfn->cdev)) { struct qed_igu_info *p_info; struct qed_igu_block *p_block; p_info = p_hwfn->hw_info.p_igu_info; p_block = &p_info->entry[sb_info->igu_sb_id]; p_block->sb_info = sb_info; p_block->status &= ~QED_IGU_STATUS_FREE; p_info->usage.free_cnt--; } else { qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); } } sb_info->cdev = p_hwfn->cdev; /* The igu address will hold the absolute address that needs to be * written to for a specific status block */ if (IS_PF(p_hwfn->cdev)) { sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); } else { sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + PXP_VF_BAR0_START_IGU + ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); } sb_info->flags |= QED_SB_INFO_INIT; qed_int_sb_setup(p_hwfn, p_ptt, sb_info); return 0; } int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id) { struct qed_igu_block *p_block; struct qed_igu_info *p_info; if (!sb_info) return 0; /* zero status block and ack counter */ sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); if (IS_VF(p_hwfn->cdev)) { qed_vf_set_sb_info(p_hwfn, sb_id, NULL); return 0; } p_info = p_hwfn->hw_info.p_igu_info; p_block = &p_info->entry[sb_info->igu_sb_id]; /* Vector 0 is reserved to Default SB */ if (!p_block->vector_number) { DP_ERR(p_hwfn, "Do Not free sp sb using this function"); return -EINVAL; } /* Lose reference to client's SB info, and fix counters */ p_block->sb_info = NULL; p_block->status |= QED_IGU_STATUS_FREE; p_info->usage.free_cnt++; return 0; } static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) { struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; if (!p_sb) return; if (p_sb->sb_info.sb_virt) dma_free_coherent(&p_hwfn->cdev->pdev->dev, SB_ALIGNED_SIZE(p_hwfn), p_sb->sb_info.sb_virt, p_sb->sb_info.sb_phys); kfree(p_sb); p_hwfn->p_sp_sb = NULL; } static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_sb_sp_info *p_sb; dma_addr_t p_phys = 0; void *p_virt; /* SB struct */ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) return -ENOMEM; /* SB ring */ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, SB_ALIGNED_SIZE(p_hwfn), &p_phys, GFP_KERNEL); if (!p_virt) { kfree(p_sb); return -ENOMEM; } /* Status Block setup */ p_hwfn->p_sp_sb = p_sb; qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, p_phys, QED_SP_SB_ID); memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); return 0; } int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, void *cookie, u8 *sb_idx, __le16 **p_fw_cons) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; int rc = -ENOMEM; u8 pi; /* Look for a free index */ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { if (p_sp_sb->pi_info_arr[pi].comp_cb) continue; p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; p_sp_sb->pi_info_arr[pi].cookie = cookie; *sb_idx = pi; *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; rc = 0; break; } return rc; } int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) return -ENOMEM; p_sp_sb->pi_info_arr[pi].comp_cb = NULL; p_sp_sb->pi_info_arr[pi].cookie = NULL; return 0; } u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) { return p_hwfn->p_sp_sb->sb_info.igu_sb_id; } void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; p_hwfn->cdev->int_mode = int_mode; switch (p_hwfn->cdev->int_mode) { case QED_INT_MODE_INTA: igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; break; case QED_INT_MODE_MSI: igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; break; case QED_INT_MODE_MSIX: igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; break; case QED_INT_MODE_POLL: break; } qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { /* Configure AEU signal change to produce attentions */ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); } int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { int rc = 0; qed_int_igu_enable_attn(p_hwfn, p_ptt); if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { rc = qed_slowpath_irq_req(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); return -EINVAL; } p_hwfn->b_int_requested = true; } /* Enable interrupt Generation */ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); p_hwfn->b_int_enabled = 1; return rc; } void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->b_int_enabled = 0; if (IS_VF(p_hwfn->cdev)) return; qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); } #define IGU_CLEANUP_SLEEP_LENGTH (1000) static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 igu_sb_id, bool cleanup_set, u16 opaque_fid) { u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; /* Set the data field */ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); /* Set the control register */ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); barrier(); qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); /* calculate where to read the status bit from */ sb_bit = 1 << (igu_sb_id % 32); sb_bit_addr = igu_sb_id / 32 * sizeof(u32); sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; /* Now wait for the command to complete */ do { val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) break; usleep_range(5000, 10000); } while (--sleep_cnt); if (!sleep_cnt) DP_NOTICE(p_hwfn, "Timeout waiting for clear status 0x%08x [for sb %d]\n", val, igu_sb_id); } void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 igu_sb_id, u16 opaque, bool b_set) { struct qed_igu_block *p_block; int pi, i; p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", igu_sb_id, p_block->function_id, p_block->is_pf, p_block->vector_number); /* Set */ if (b_set) qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); /* Clear */ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); /* Wait for the IGU SB to cleanup */ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { u32 val; val = qed_rd(p_hwfn, p_ptt, IGU_REG_WRITE_DONE_PENDING + ((igu_sb_id / 32) * 4)); if (val & BIT((igu_sb_id % 32))) usleep_range(10, 20); else break; } if (i == IGU_CLEANUP_SLEEP_LENGTH) DP_NOTICE(p_hwfn, "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", igu_sb_id); /* Clear the CAU for the SB */ for (pi = 0; pi < 12; pi++) qed_wr(p_hwfn, p_ptt, CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); } void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_set, bool b_slowpath) { struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; struct qed_igu_block *p_block; u16 igu_sb_id = 0; u32 val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); for (igu_sb_id = 0; igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { p_block = &p_info->entry[igu_sb_id]; if (!(p_block->status & QED_IGU_STATUS_VALID) || !p_block->is_pf || (p_block->status & QED_IGU_STATUS_DSB)) continue; qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, p_hwfn->hw_info.opaque_fid, b_set); } if (b_slowpath) qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, p_info->igu_dsb_id, p_hwfn->hw_info.opaque_fid, b_set); } int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; struct qed_igu_block *p_block; int pf_sbs, vf_sbs; u16 igu_sb_id; u32 val, rval; if (!RESC_NUM(p_hwfn, QED_SB)) { p_info->b_allow_pf_vf_change = false; } else { /* Use the numbers the MFW have provided - * don't forget MFW accounts for the default SB as well. */ p_info->b_allow_pf_vf_change = true; if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { DP_INFO(p_hwfn, "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", RESC_NUM(p_hwfn, QED_SB) - 1, p_info->usage.cnt); p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; } if (IS_PF_SRIOV(p_hwfn)) { u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; if (vfs != p_info->usage.iov_cnt) DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", p_info->usage.iov_cnt, vfs); /* At this point we know how many SBs we have totally * in IGU + number of PF SBs. So we can validate that * we'd have sufficient for VF. */ if (vfs > p_info->usage.free_cnt + p_info->usage.free_cnt_iov - p_info->usage.cnt) { DP_NOTICE(p_hwfn, "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", p_info->usage.free_cnt + p_info->usage.free_cnt_iov, p_info->usage.cnt, vfs); return -EINVAL; } /* Currently cap the number of VFs SBs by the * number of VFs. */ p_info->usage.iov_cnt = vfs; } } /* Mark all SBs as free, now in the right PF/VFs division */ p_info->usage.free_cnt = p_info->usage.cnt; p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; p_info->usage.orig = p_info->usage.cnt; p_info->usage.iov_orig = p_info->usage.iov_cnt; /* We now proceed to re-configure the IGU cam to reflect the initial * configuration. We can start with the Default SB. */ pf_sbs = p_info->usage.cnt; vf_sbs = p_info->usage.iov_cnt; for (igu_sb_id = p_info->igu_dsb_id; igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { p_block = &p_info->entry[igu_sb_id]; val = 0; if (!(p_block->status & QED_IGU_STATUS_VALID)) continue; if (p_block->status & QED_IGU_STATUS_DSB) { p_block->function_id = p_hwfn->rel_pf_id; p_block->is_pf = 1; p_block->vector_number = 0; p_block->status = QED_IGU_STATUS_VALID | QED_IGU_STATUS_PF | QED_IGU_STATUS_DSB; } else if (pf_sbs) { pf_sbs--; p_block->function_id = p_hwfn->rel_pf_id; p_block->is_pf = 1; p_block->vector_number = p_info->usage.cnt - pf_sbs; p_block->status = QED_IGU_STATUS_VALID | QED_IGU_STATUS_PF | QED_IGU_STATUS_FREE; } else if (vf_sbs) { p_block->function_id = p_hwfn->cdev->p_iov_info->first_vf_in_pf + p_info->usage.iov_cnt - vf_sbs; p_block->is_pf = 0; p_block->vector_number = 0; p_block->status = QED_IGU_STATUS_VALID | QED_IGU_STATUS_FREE; vf_sbs--; } else { p_block->function_id = 0; p_block->is_pf = 0; p_block->vector_number = 0; } SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, p_block->function_id); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, p_block->vector_number); /* VF entries would be enabled when VF is initializaed */ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); rval = qed_rd(p_hwfn, p_ptt, IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); if (rval != val) { qed_wr(p_hwfn, p_ptt, IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, val); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", igu_sb_id, p_block->function_id, p_block->is_pf, p_block->vector_number, rval, val); } } return 0; } static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 igu_sb_id) { u32 val = qed_rd(p_hwfn, p_ptt, IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); struct qed_igu_block *p_block; p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; /* Fill the block information */ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); p_block->igu_sb_id = igu_sb_id; } int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; struct qed_igu_block *p_block; u32 min_vf = 0, max_vf = 0; u16 igu_sb_id; p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); if (!p_hwfn->hw_info.p_igu_info) return -ENOMEM; p_igu_info = p_hwfn->hw_info.p_igu_info; /* Distinguish between existent and non-existent default SB */ p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; /* Find the range of VF ids whose SB belong to this PF */ if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; min_vf = p_iov->first_vf_in_pf; max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; } for (igu_sb_id = 0; igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { /* Read current entry; Notice it might not belong to this PF */ qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); p_block = &p_igu_info->entry[igu_sb_id]; if ((p_block->is_pf) && (p_block->function_id == p_hwfn->rel_pf_id)) { p_block->status = QED_IGU_STATUS_PF | QED_IGU_STATUS_VALID | QED_IGU_STATUS_FREE; if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) p_igu_info->usage.cnt++; } else if (!(p_block->is_pf) && (p_block->function_id >= min_vf) && (p_block->function_id < max_vf)) { /* Available for VFs of this PF */ p_block->status = QED_IGU_STATUS_VALID | QED_IGU_STATUS_FREE; if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) p_igu_info->usage.iov_cnt++; } /* Mark the First entry belonging to the PF or its VFs * as the default SB [we'll reset IGU prior to first usage]. */ if ((p_block->status & QED_IGU_STATUS_VALID) && (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { p_igu_info->igu_dsb_id = igu_sb_id; p_block->status |= QED_IGU_STATUS_DSB; } /* limit number of prints by having each PF print only its * entries with the exception of PF0 which would print * everything. */ if ((p_block->status & QED_IGU_STATUS_VALID) || (p_hwfn->abs_pf_id == 0)) { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", igu_sb_id, p_block->function_id, p_block->is_pf, p_block->vector_number); } } if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { DP_NOTICE(p_hwfn, "IGU CAM returned invalid values igu_dsb_id=0x%x\n", p_igu_info->igu_dsb_id); return -EINVAL; } /* All non default SB are considered free at this point */ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); return 0; } /** * qed_int_igu_init_rt() - Initialize IGU runtime registers. * * @p_hwfn: HW device data. */ void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) { u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); } u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) { u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - IGU_CMD_INT_ACK_BASE; u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - IGU_CMD_INT_ACK_BASE; u32 intr_status_hi = 0, intr_status_lo = 0; u64 intr_status = 0; intr_status_lo = REG_RD(p_hwfn, GTT_BAR0_MAP_REG_IGU_CMD + lsb_igu_cmd_addr * 8); intr_status_hi = REG_RD(p_hwfn, GTT_BAR0_MAP_REG_IGU_CMD + msb_igu_cmd_addr * 8); intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; return intr_status; } static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) { tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc); p_hwfn->b_sp_dpc_enabled = true; } int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int rc = 0; rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); if (rc) return rc; rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); return rc; } void qed_int_free(struct qed_hwfn *p_hwfn) { qed_int_sp_sb_free(p_hwfn); qed_int_sb_attn_free(p_hwfn); } void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); qed_int_sb_attn_setup(p_hwfn, p_ptt); qed_int_sp_dpc_setup(p_hwfn); } void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info) { struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; if (!info || !p_sb_cnt_info) return; memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); } void qed_int_disable_post_isr_release(struct qed_dev *cdev) { int i; for_each_hwfn(cdev, i) cdev->hwfns[i].b_int_requested = false; } void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable) { cdev->attn_clr_en = clr_enable; } int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 timer_res, u16 sb_id, bool tx) { struct cau_sb_entry sb_entry; u32 params; int rc; if (!p_hwfn->hw_init_done) { DP_ERR(p_hwfn, "hardware not initialized yet\n"); return -EINVAL; } rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), (u64)(uintptr_t)&sb_entry, 2, NULL); if (rc) { DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); return rc; } params = le32_to_cpu(sb_entry.params); if (tx) SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); else SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); sb_entry.params = cpu_to_le32(params); rc = qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, NULL); if (rc) { DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); return rc; } return rc; } int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info) { u16 sbid = p_sb->igu_sb_id; u32 i; if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (sbid >= NUM_OF_SBS(p_hwfn->cdev)) return -EINVAL; p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4); p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4); for (i = 0; i < PIS_PER_SB; i++) p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt, CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4); return 0; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_int.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/spinlock.h> #include <linux/tcp.h> #include "qed_cxt.h" #include "qed_hw.h" #include "qed_ll2.h" #include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_ooo.h" #define QED_IWARP_ORD_DEFAULT 32 #define QED_IWARP_IRD_DEFAULT 32 #define QED_IWARP_MAX_FW_MSS 4120 #define QED_EP_SIG 0xecabcdef struct mpa_v2_hdr { __be16 ird; __be16 ord; }; #define MPA_V2_PEER2PEER_MODEL 0x8000 #define MPA_V2_SEND_RTR 0x4000 /* on ird */ #define MPA_V2_READ_RTR 0x4000 /* on ord */ #define MPA_V2_WRITE_RTR 0x8000 #define MPA_V2_IRD_ORD_MASK 0x3FFF #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) #define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024) #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024) #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024) #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024) #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) #define TIMESTAMP_HEADER_SIZE (12) #define QED_IWARP_MAX_FIN_RT_DEFAULT (2) #define QED_IWARP_TS_EN BIT(0) #define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_P2P (1) #define QED_IWARP_DEF_MAX_RT_TIME (0) #define QED_IWARP_DEF_CWND_FACTOR (4) #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, __le16 echo, union event_ring_data *data, u8 fw_return_code); /* Override devinfo with iWARP specific values */ void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) { struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; dev->max_qp = min_t(u32, IWARP_MAX_QPS, p_hwfn->p_rdma_info->num_qps) - QED_IWARP_PREALLOC_CNT; dev->max_cq = dev->max_qp; dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; } void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); p_hwfn->b_rdma_enabled_in_prs = true; } /* We have two cid maps, one for tcp which should be used only from passive * syn processing and replacing a pre-allocated ep in the list. The second * for active tcp and for QPs. */ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) { cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); spin_lock_bh(&p_hwfn->p_rdma_info->lock); if (cid < QED_IWARP_PREALLOC_CNT) qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); else qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, struct iwarp_init_func_ramrod_data *p_ramrod) { p_ramrod->iwarp.ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; return; } static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) { int rc; spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) { DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); return rc; } *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); if (rc) qed_iwarp_cid_cleaned(p_hwfn, *cid); return rc; } static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) { cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } /* This function allocates a cid for passive tcp (called from syn receive) * the reason it's separate from the regular cid allocation is because it * is assured that these cids already have ilt allocated. They are preallocated * to ensure that we won't need to allocate memory during syn processing */ static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) { int rc; spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "can't allocate iwarp tcp cid max-count=%d\n", p_hwfn->p_rdma_info->tcp_cid_map.max_count); *cid = QED_IWARP_INVALID_TCP_CID; return rc; } *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); return 0; } int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, struct qed_rdma_create_qp_out_params *out_params) { struct iwarp_create_qp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u16 physical_queue; u32 cid; int rc; qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, IWARP_SHARED_QUEUE_PAGE_SIZE, &qp->shared_queue_phys_addr, GFP_KERNEL); if (!qp->shared_queue) return -ENOMEM; out_params->sq_pbl_virt = (u8 *)qp->shared_queue + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; out_params->sq_pbl_phys = qp->shared_queue_phys_addr + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; out_params->rq_pbl_virt = (u8 *)qp->shared_queue + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; out_params->rq_pbl_phys = qp->shared_queue_phys_addr + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; rc = qed_iwarp_alloc_cid(p_hwfn, &cid); if (rc) goto err1; qp->icid = (u16)cid; memset(&init_data, 0, sizeof(init_data)); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.cid = qp->icid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_CREATE_QP, PROTOCOLID_IWARP, &init_data); if (rc) goto err2; p_ramrod = &p_ent->ramrod.iwarp_create_qp; SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, qp->fmr_and_reserved_lkey); SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); p_ramrod->pd = cpu_to_le16(qp->pd); p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid_for_sq = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); p_ramrod->cq_cid_for_rq = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); p_ramrod->dpi = cpu_to_le16(qp->dpi); physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); p_ramrod->physical_q0 = cpu_to_le16(physical_queue); physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); p_ramrod->physical_q1 = cpu_to_le16(physical_queue); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err2; return rc; err2: qed_iwarp_cid_cleaned(p_hwfn, cid); err1: dma_free_coherent(&p_hwfn->cdev->pdev->dev, IWARP_SHARED_QUEUE_PAGE_SIZE, qp->shared_queue, qp->shared_queue_phys_addr); return rc; } static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct iwarp_modify_qp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u16 flags, trans_to_state; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_MODIFY_QP, p_hwfn->p_rdma_info->proto, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iwarp_modify_qp; flags = le16_to_cpu(p_ramrod->flags); SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1); p_ramrod->flags = cpu_to_le16(flags); if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING; else trans_to_state = IWARP_MODIFY_QP_STATE_ERROR; p_ramrod->transition_to_state = cpu_to_le16(trans_to_state); rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); return rc; } enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) { switch (state) { case QED_ROCE_QP_STATE_RESET: case QED_ROCE_QP_STATE_INIT: case QED_ROCE_QP_STATE_RTR: return QED_IWARP_QP_STATE_IDLE; case QED_ROCE_QP_STATE_RTS: return QED_IWARP_QP_STATE_RTS; case QED_ROCE_QP_STATE_SQD: return QED_IWARP_QP_STATE_CLOSING; case QED_ROCE_QP_STATE_ERR: return QED_IWARP_QP_STATE_ERROR; case QED_ROCE_QP_STATE_SQE: return QED_IWARP_QP_STATE_TERMINATE; default: return QED_IWARP_QP_STATE_ERROR; } } static enum qed_roce_qp_state qed_iwarp2roce_state(enum qed_iwarp_qp_state state) { switch (state) { case QED_IWARP_QP_STATE_IDLE: return QED_ROCE_QP_STATE_INIT; case QED_IWARP_QP_STATE_RTS: return QED_ROCE_QP_STATE_RTS; case QED_IWARP_QP_STATE_TERMINATE: return QED_ROCE_QP_STATE_SQE; case QED_IWARP_QP_STATE_CLOSING: return QED_ROCE_QP_STATE_SQD; case QED_IWARP_QP_STATE_ERROR: return QED_ROCE_QP_STATE_ERR; default: return QED_ROCE_QP_STATE_ERR; } } static const char * const iwarp_state_names[] = { "IDLE", "RTS", "TERMINATE", "CLOSING", "ERROR", }; int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, enum qed_iwarp_qp_state new_state, bool internal) { enum qed_iwarp_qp_state prev_iw_state; bool modify_fw = false; int rc = 0; /* modify QP can be called from upper-layer or as a result of async * RST/FIN... therefore need to protect */ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); prev_iw_state = qp->iwarp_state; if (prev_iw_state == new_state) { spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); return 0; } switch (prev_iw_state) { case QED_IWARP_QP_STATE_IDLE: switch (new_state) { case QED_IWARP_QP_STATE_RTS: qp->iwarp_state = QED_IWARP_QP_STATE_RTS; break; case QED_IWARP_QP_STATE_ERROR: qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; if (!internal) modify_fw = true; break; default: break; } break; case QED_IWARP_QP_STATE_RTS: switch (new_state) { case QED_IWARP_QP_STATE_CLOSING: if (!internal) modify_fw = true; qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; break; case QED_IWARP_QP_STATE_ERROR: if (!internal) modify_fw = true; qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; break; default: break; } break; case QED_IWARP_QP_STATE_ERROR: switch (new_state) { case QED_IWARP_QP_STATE_IDLE: qp->iwarp_state = new_state; break; case QED_IWARP_QP_STATE_CLOSING: /* could happen due to race... do nothing.... */ break; default: rc = -EINVAL; } break; case QED_IWARP_QP_STATE_TERMINATE: case QED_IWARP_QP_STATE_CLOSING: qp->iwarp_state = new_state; break; default: break; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", qp->icid, iwarp_state_names[prev_iw_state], iwarp_state_names[qp->iwarp_state], internal ? "internal" : ""); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); if (modify_fw) rc = qed_iwarp_modify_fw(p_hwfn, qp); return rc; } int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_DESTROY_QP, p_hwfn->p_rdma_info->proto, &init_data); if (rc) return rc; rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); return rc; } static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, bool remove_from_active_list) { dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*ep->ep_buffer_virt), ep->ep_buffer_virt, ep->ep_buffer_phys); if (remove_from_active_list) { spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_del(&ep->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } if (ep->qp) ep->qp->ep = NULL; kfree(ep); } int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct qed_iwarp_ep *ep = qp->ep; int wait_count = 0; int rc = 0; if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { rc = qed_iwarp_modify_qp(p_hwfn, qp, QED_IWARP_QP_STATE_ERROR, false); if (rc) return rc; } /* Make sure ep is closed before returning and freeing memory. */ if (ep) { while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED && wait_count++ < 200) msleep(100); if (ep->state != QED_IWARP_EP_CLOSED) DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", ep->state); qed_iwarp_destroy_ep(p_hwfn, ep, false); } rc = qed_iwarp_fw_destroy(p_hwfn, qp); if (qp->shared_queue) dma_free_coherent(&p_hwfn->cdev->pdev->dev, IWARP_SHARED_QUEUE_PAGE_SIZE, qp->shared_queue, qp->shared_queue_phys_addr); return rc; } static int qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) { struct qed_iwarp_ep *ep; int rc; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (!ep) return -ENOMEM; ep->state = QED_IWARP_EP_INIT; ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*ep->ep_buffer_virt), &ep->ep_buffer_phys, GFP_KERNEL); if (!ep->ep_buffer_virt) { rc = -ENOMEM; goto err; } ep->sig = QED_EP_SIG; *ep_out = ep; return 0; err: kfree(ep); return rc; } static void qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", p_tcp_ramrod->tcp.local_mac_addr_lo, p_tcp_ramrod->tcp.local_mac_addr_mid, p_tcp_ramrod->tcp.local_mac_addr_hi, p_tcp_ramrod->tcp.remote_mac_addr_lo, p_tcp_ramrod->tcp.remote_mac_addr_mid, p_tcp_ramrod->tcp.remote_mac_addr_hi); if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.remote_ip, p_tcp_ramrod->tcp.remote_port, p_tcp_ramrod->tcp.vlan_id); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.remote_ip, p_tcp_ramrod->tcp.remote_port, p_tcp_ramrod->tcp.vlan_id); } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", p_tcp_ramrod->tcp.flow_label, p_tcp_ramrod->tcp.ttl, p_tcp_ramrod->tcp.tos_or_tc, p_tcp_ramrod->tcp.mss, p_tcp_ramrod->tcp.rcv_wnd_scale, p_tcp_ramrod->tcp.connect_mode, p_tcp_ramrod->tcp.flags); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", p_tcp_ramrod->tcp.syn_ip_payload_length, p_tcp_ramrod->tcp.syn_phy_addr_lo, p_tcp_ramrod->tcp.syn_phy_addr_hi); } static int qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; struct tcp_offload_params_opt2 *tcp; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; dma_addr_t async_output_phys; dma_addr_t in_pdata_phys; u16 physical_q; u16 flags = 0; u8 tcp_flags; int rc; int i; memset(&init_data, 0, sizeof(init_data)); init_data.cid = ep->tcp_cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; if (ep->connect_mode == TCP_CONNECT_PASSIVE) init_data.comp_mode = QED_SPQ_MODE_CB; else init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, PROTOCOLID_IWARP, &init_data); if (rc) return rc; p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; in_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, in_pdata); DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, in_pdata_phys); p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); async_output_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, async_output); DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, async_output_phys); p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; tcp = &p_tcp_ramrod->tcp; qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, &tcp->remote_mac_addr_mid, &tcp->remote_mac_addr_lo, ep->remote_mac_addr); qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, &tcp->local_mac_addr_lo, ep->local_mac_addr); tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, !!(tcp_flags & QED_IWARP_TS_EN)); SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, !!(tcp_flags & QED_IWARP_DA_EN)); tcp->flags = cpu_to_le16(flags); tcp->ip_version = ep->cm_info.ip_version; for (i = 0; i < 4; i++) { tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); } tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); tcp->local_port = cpu_to_le16(ep->cm_info.local_port); tcp->mss = cpu_to_le16(ep->mss); tcp->flow_label = 0; tcp->ttl = 0x40; tcp->tos_or_tc = 0; tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss); tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT); tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL); tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->connect_mode = ep->connect_mode; if (ep->connect_mode == TCP_CONNECT_PASSIVE) { tcp->syn_ip_payload_length = cpu_to_le16(ep->syn_ip_payload_length); tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); } qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); return rc; } static void qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; struct qed_iwarp_cm_event_params params; struct mpa_v2_hdr *mpa_v2; union async_output *async_data; u16 mpa_ord, mpa_ird; u8 mpa_hdr_size = 0; u16 ulp_data_len; u8 mpa_rev; async_data = &ep->ep_buffer_virt->async_output; mpa_rev = async_data->mpa_request.mpa_handshake_mode; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "private_data_len=%x handshake_mode=%x private_data=(%x)\n", async_data->mpa_request.ulp_data_len, mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { /* Read ord/ird values from private data buffer */ mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; mpa_hdr_size = sizeof(*mpa_v2); mpa_ord = ntohs(mpa_v2->ord); mpa_ird = ntohs(mpa_v2->ird); /* Temprary store in cm_info incoming ord/ird requested, later * replace with negotiated value during accept */ ep->cm_info.ord = (u8)min_t(u16, (mpa_ord & MPA_V2_IRD_ORD_MASK), QED_IWARP_ORD_DEFAULT); ep->cm_info.ird = (u8)min_t(u16, (mpa_ird & MPA_V2_IRD_ORD_MASK), QED_IWARP_IRD_DEFAULT); /* Peer2Peer negotiation */ ep->rtr_type = MPA_RTR_TYPE_NONE; if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { if (mpa_ord & MPA_V2_WRITE_RTR) ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; if (mpa_ord & MPA_V2_READ_RTR) ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; if (mpa_ird & MPA_V2_SEND_RTR) ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; ep->rtr_type &= iwarp_info->rtr_type; /* if we're left with no match send our capabilities */ if (ep->rtr_type == MPA_RTR_TYPE_NONE) ep->rtr_type = iwarp_info->rtr_type; } ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; } else { ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, async_data->mpa_request.ulp_data_len, mpa_hdr_size); /* Strip mpa v2 hdr from private data before sending to upper layer */ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len); ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size; params.event = QED_IWARP_EVENT_MPA_REQUEST; params.cm_info = &ep->cm_info; params.ep_context = ep; params.status = 0; ep->state = QED_IWARP_EP_MPA_REQ_RCVD; ep->event_cb(ep->cb_context, &params); } static int qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; struct mpa_outgoing_params *common; struct qed_iwarp_info *iwarp_info; struct qed_sp_init_data init_data; dma_addr_t async_output_phys; struct qed_spq_entry *p_ent; dma_addr_t out_pdata_phys; dma_addr_t in_pdata_phys; struct qed_rdma_qp *qp; bool reject; u32 val; int rc; if (!ep) return -EINVAL; qp = ep->qp; reject = !qp; memset(&init_data, 0, sizeof(init_data)); init_data.cid = reject ? ep->tcp_cid : qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; if (ep->connect_mode == TCP_CONNECT_ACTIVE) init_data.comp_mode = QED_SPQ_MODE_CB; else init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, PROTOCOLID_IWARP, &init_data); if (rc) return rc; p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; common = &p_mpa_ramrod->common; out_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, out_pdata); DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); val = ep->cm_info.private_data_len; common->outgoing_ulp_buffer.len = cpu_to_le16(val); common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; common->out_rq.ord = cpu_to_le32(ep->cm_info.ord); common->out_rq.ird = cpu_to_le32(ep->cm_info.ird); val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; p_mpa_ramrod->tcp_cid = cpu_to_le32(val); in_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, in_pdata); p_mpa_ramrod->tcp_connect_side = ep->connect_mode; DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, in_pdata_phys); p_mpa_ramrod->incoming_ulp_buffer.len = cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); async_output_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, async_output); DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, async_output_phys); p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); if (!reject) { DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, qp->shared_queue_phys_addr); p_mpa_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; } else { common->reject = 1; } iwarp_info = &p_hwfn->p_rdma_info->iwarp; p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size); p_mpa_ramrod->mode = ep->mpa_rev; SET_FIELD(p_mpa_ramrod->rtr_pref, IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); ep->state = QED_IWARP_EP_MPA_OFFLOADED; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (!reject) ep->cid = qp->icid; /* Now they're migrated. */ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", reject ? 0xffff : qp->icid, ep->tcp_cid, rc, ep->cm_info.ird, ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); return rc; } static void qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { ep->state = QED_IWARP_EP_INIT; if (ep->qp) ep->qp->ep = NULL; ep->qp = NULL; memset(&ep->cm_info, 0, sizeof(ep->cm_info)); if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { /* We don't care about the return code, it's ok if tcp_cid * remains invalid...in this case we'll defer allocation */ qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); } spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_move_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } static void qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct mpa_v2_hdr *mpa_v2_params; union async_output *async_data; u16 mpa_ird, mpa_ord; u8 mpa_data_size = 0; u16 ulp_data_len; if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { mpa_v2_params = (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); mpa_data_size = sizeof(*mpa_v2_params); mpa_ird = ntohs(mpa_v2_params->ird); mpa_ord = ntohs(mpa_v2_params->ord); ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); } async_data = &ep->ep_buffer_virt->async_output; ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len); ep->cm_info.private_data_len = ulp_data_len - mpa_data_size; } static void qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct qed_iwarp_cm_event_params params; if (ep->connect_mode == TCP_CONNECT_PASSIVE) { DP_NOTICE(p_hwfn, "MPA reply event not expected on passive side!\n"); return; } params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; qed_iwarp_parse_private_data(p_hwfn, ep); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); params.cm_info = &ep->cm_info; params.ep_context = ep; params.status = 0; ep->mpa_reply_processed = true; ep->event_cb(ep->cb_context, &params); } #define QED_IWARP_CONNECT_MODE_STRING(ep) \ ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" /* Called as a result of the event: * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE */ static void qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { struct qed_iwarp_cm_event_params params; if (ep->connect_mode == TCP_CONNECT_ACTIVE) params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; else params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) qed_iwarp_parse_private_data(p_hwfn, ep); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); params.cm_info = &ep->cm_info; params.ep_context = ep; switch (fw_return_code) { case RDMA_RETURN_OK: ep->qp->max_rd_atomic_req = ep->cm_info.ord; ep->qp->max_rd_atomic_resp = ep->cm_info.ird; qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); ep->state = QED_IWARP_EP_ESTABLISHED; params.status = 0; break; case IWARP_CONN_ERROR_MPA_TIMEOUT: DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -EBUSY; break; case IWARP_CONN_ERROR_MPA_ERROR_REJECT: DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_RST: DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, ep->tcp_cid); params.status = -ECONNRESET; break; case IWARP_CONN_ERROR_MPA_FIN: DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_INSUF_IRD: DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_INVALID_PACKET: DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_TERMINATE: DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); params.status = -ECONNREFUSED; break; default: params.status = -ECONNRESET; break; } if (fw_return_code != RDMA_RETURN_OK) /* paired with READ_ONCE in destroy_qp */ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); ep->event_cb(ep->cb_context, &params); /* on passive side, if there is no associated QP (REJECT) we need to * return the ep to the pool, (in the regular case we add an element * in accept instead of this one. * In both cases we need to remove it from the ep_list. */ if (fw_return_code != RDMA_RETURN_OK) { ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && (!ep->qp)) { /* Rejected */ qed_iwarp_return_ep(p_hwfn, ep); } else { spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_del(&ep->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } } } static void qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 *mpa_data_size) { struct mpa_v2_hdr *mpa_v2_params; u16 mpa_ird, mpa_ord; *mpa_data_size = 0; if (MPA_REV2(ep->mpa_rev)) { mpa_v2_params = (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; *mpa_data_size = sizeof(*mpa_v2_params); mpa_ird = (u16)ep->cm_info.ird; mpa_ord = (u16)ep->cm_info.ord; if (ep->rtr_type != MPA_RTR_TYPE_NONE) { mpa_ird |= MPA_V2_PEER2PEER_MODEL; if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) mpa_ird |= MPA_V2_SEND_RTR; if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) mpa_ord |= MPA_V2_WRITE_RTR; if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) mpa_ord |= MPA_V2_READ_RTR; } mpa_v2_params->ird = htons(mpa_ird); mpa_v2_params->ord = htons(mpa_ord); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", mpa_v2_params->ird, mpa_v2_params->ord, *((u32 *)mpa_v2_params), mpa_ord & MPA_V2_IRD_ORD_MASK, mpa_ird & MPA_V2_IRD_ORD_MASK, !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), !!(mpa_ird & MPA_V2_SEND_RTR), !!(mpa_ord & MPA_V2_WRITE_RTR), !!(mpa_ord & MPA_V2_READ_RTR)); } } int qed_iwarp_connect(void *rdma_cxt, struct qed_iwarp_connect_in *iparams, struct qed_iwarp_connect_out *oparams) { struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_iwarp_info *iwarp_info; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; u32 cid; int rc; if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { DP_NOTICE(p_hwfn, "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", iparams->qp->icid, iparams->cm_info.ord, iparams->cm_info.ird); return -EINVAL; } iwarp_info = &p_hwfn->p_rdma_info->iwarp; /* Allocate ep object */ rc = qed_iwarp_alloc_cid(p_hwfn, &cid); if (rc) return rc; rc = qed_iwarp_create_ep(p_hwfn, &ep); if (rc) goto err; ep->tcp_cid = cid; spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); ep->qp = iparams->qp; ep->qp->ep = ep; ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); ep->cm_info.ord = iparams->cm_info.ord; ep->cm_info.ird = iparams->cm_info.ird; ep->rtr_type = iwarp_info->rtr_type; if (!iwarp_info->peer2peer) ep->rtr_type = MPA_RTR_TYPE_NONE; if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) ep->cm_info.ord = 1; ep->mpa_rev = iwarp_info->mpa_rev; qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; ep->cm_info.private_data_len = iparams->cm_info.private_data_len + mpa_data_size; memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, iparams->cm_info.private_data, iparams->cm_info.private_data_len); ep->mss = iparams->mss; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = iparams->event_cb; ep->cb_context = iparams->cb_context; ep->connect_mode = TCP_CONNECT_ACTIVE; oparams->ep_context = ep; rc = qed_iwarp_tcp_offload(p_hwfn, ep); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", iparams->qp->icid, ep->tcp_cid, rc); if (rc) { qed_iwarp_destroy_ep(p_hwfn, ep, true); goto err; } return rc; err: qed_iwarp_cid_cleaned(p_hwfn, cid); return rc; } static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) { struct qed_iwarp_ep *ep = NULL; int rc; spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { DP_ERR(p_hwfn, "Ep list is empty\n"); goto out; } ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, struct qed_iwarp_ep, list_entry); /* in some cases we could have failed allocating a tcp cid when added * from accept / failure... retry now..this is not the common case. */ if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); /* if we fail we could look for another entry with a valid * tcp_cid, but since we don't expect to reach this anyway * it's not worth the handling */ if (rc) { ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; ep = NULL; goto out; } } list_del(&ep->list_entry); out: spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); return ep; } #define QED_IWARP_MAX_CID_CLEAN_TIME 100 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5 /* This function waits for all the bits of a bmap to be cleared, as long as * there is progress ( i.e. the number of bits left to be cleared decreases ) * the function continues. */ static int qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) { int prev_weight = 0; int wait_count = 0; int weight = 0; weight = bitmap_weight(bmap->bitmap, bmap->max_count); prev_weight = weight; while (weight) { /* If the HW device is during recovery, all resources are * immediately reset without receiving a per-cid indication * from HW. In this case we don't expect the cid_map to be * cleared. */ if (p_hwfn->cdev->recov_in_prog) return 0; msleep(QED_IWARP_MAX_CID_CLEAN_TIME); weight = bitmap_weight(bmap->bitmap, bmap->max_count); if (prev_weight == weight) { wait_count++; } else { prev_weight = weight; wait_count = 0; } if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { DP_NOTICE(p_hwfn, "%s bitmap wait timed out (%d cids pending)\n", bmap->name, weight); return -EBUSY; } } return 0; } static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) { int rc; int i; rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map); if (rc) return rc; /* Now free the tcp cids from the main cid map */ for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); /* Now wait for all cids to be completed */ return qed_iwarp_wait_cid_map_cleared(p_hwfn, &p_hwfn->p_rdma_info->cid_map); } static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) { struct qed_iwarp_ep *ep; while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, struct qed_iwarp_ep, list_entry); if (!ep) { spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); break; } list_del(&ep->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); qed_iwarp_destroy_ep(p_hwfn, ep, false); } } static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) { struct qed_iwarp_ep *ep; int rc = 0; int count; u32 cid; int i; count = init ? QED_IWARP_PREALLOC_CNT : 1; for (i = 0; i < count; i++) { rc = qed_iwarp_create_ep(p_hwfn, &ep); if (rc) return rc; /* During initialization we allocate from the main pool, * afterwards we allocate only from the tcp_cid. */ if (init) { rc = qed_iwarp_alloc_cid(p_hwfn, &cid); if (rc) goto err; qed_iwarp_set_tcp_cid(p_hwfn, cid); } else { /* We don't care about the return code, it's ok if * tcp_cid remains invalid...in this case we'll * defer allocation */ qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); } ep->tcp_cid = cid; spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } return rc; err: qed_iwarp_destroy_ep(p_hwfn, ep, false); return rc; } int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) { int rc; /* Allocate bitmap for tcp cid. These are used by passive side * to ensure it can allocate a tcp cid during dpc that was * pre-acquired and doesn't require dynamic allocation of ilt */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, QED_IWARP_PREALLOC_CNT, "TCP_CID"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate tcp cid, rc = %d\n", rc); return rc; } INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); rc = qed_iwarp_prealloc_ep(p_hwfn, true); if (rc) return rc; return qed_ooo_alloc(p_hwfn); } void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; qed_ooo_free(p_hwfn); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); kfree(iwarp_info->mpa_bufs); kfree(iwarp_info->partial_fpdus); kfree(iwarp_info->mpa_intermediate_buf); } int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) { struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; int rc; ep = iparams->ep_context; if (!ep) { DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", iparams->qp->icid, ep->tcp_cid); if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || (iparams->ird > QED_IWARP_IRD_DEFAULT)) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", iparams->qp->icid, ep->tcp_cid, iparams->ord, iparams->ord); return -EINVAL; } qed_iwarp_prealloc_ep(p_hwfn, false); ep->cb_context = iparams->cb_context; ep->qp = iparams->qp; ep->qp->ep = ep; if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { /* Negotiate ord/ird: if upperlayer requested ord larger than * ird advertised by remote, we need to decrease our ord */ if (iparams->ord > ep->cm_info.ird) iparams->ord = ep->cm_info.ird; if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (iparams->ird == 0)) iparams->ird = 1; } /* Update cm_info ord/ird to be negotiated values */ ep->cm_info.ord = iparams->ord; ep->cm_info.ird = iparams->ird; qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; ep->cm_info.private_data_len = iparams->private_data_len + mpa_data_size; memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, iparams->private_data, iparams->private_data_len); rc = qed_iwarp_mpa_offload(p_hwfn, ep); if (rc) qed_iwarp_modify_qp(p_hwfn, iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); return rc; } int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) { struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; ep = iparams->ep_context; if (!ep) { DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); ep->cb_context = iparams->cb_context; ep->qp = NULL; qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; ep->cm_info.private_data_len = iparams->private_data_len + mpa_data_size; memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, iparams->private_data, iparams->private_data_len); return qed_iwarp_mpa_offload(p_hwfn, ep); } static void qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", cm_info->ip_version); if (cm_info->ip_version == QED_TCP_IPV4) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", cm_info->remote_ip, cm_info->remote_port, cm_info->local_ip, cm_info->local_port, cm_info->vlan); else DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", cm_info->remote_ip, cm_info->remote_port, cm_info->local_ip, cm_info->local_port, cm_info->vlan); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "private_data_len = %x ord = %d, ird = %d\n", cm_info->private_data_len, cm_info->ord, cm_info->ird); } static int qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_buff *buf, u8 handle) { int rc; rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr, (u16)buf->buff_size, buf, 1); if (rc) { DP_NOTICE(p_hwfn, "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", rc, handle); dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size, buf->data, buf->data_phys_addr); kfree(buf); } return rc; } static bool qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) { struct qed_iwarp_ep *ep = NULL; bool found = false; list_for_each_entry(ep, &p_hwfn->p_rdma_info->iwarp.ep_list, list_entry) { if ((ep->cm_info.local_port == cm_info->local_port) && (ep->cm_info.remote_port == cm_info->remote_port) && (ep->cm_info.vlan == cm_info->vlan) && !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, sizeof(cm_info->local_ip)) && !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, sizeof(cm_info->remote_ip))) { found = true; break; } } if (found) { DP_NOTICE(p_hwfn, "SYN received on active connection - dropping\n"); qed_iwarp_print_cm_info(p_hwfn, cm_info); return true; } return false; } static struct qed_iwarp_listener * qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) { struct qed_iwarp_listener *listener = NULL; static const u32 ip_zero[4] = { 0, 0, 0, 0 }; bool found = false; list_for_each_entry(listener, &p_hwfn->p_rdma_info->iwarp.listen_list, list_entry) { if (listener->port == cm_info->local_port) { if (!memcmp(listener->ip_addr, ip_zero, sizeof(ip_zero))) { found = true; break; } if (!memcmp(listener->ip_addr, cm_info->local_ip, sizeof(cm_info->local_ip)) && (listener->vlan == cm_info->vlan)) { found = true; break; } } } if (found) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", listener); return listener; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); return NULL; } static int qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info, void *buf, u8 *remote_mac_addr, u8 *local_mac_addr, int *payload_len, int *tcp_start_offset) { struct vlan_ethhdr *vethh; bool vlan_valid = false; struct ipv6hdr *ip6h; struct ethhdr *ethh; struct tcphdr *tcph; struct iphdr *iph; int eth_hlen; int ip_hlen; int eth_type; int i; ethh = buf; eth_type = ntohs(ethh->h_proto); if (eth_type == ETH_P_8021Q) { vlan_valid = true; vethh = (struct vlan_ethhdr *)ethh; cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; eth_type = ntohs(vethh->h_vlan_encapsulated_proto); } eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); if (!ether_addr_equal(ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr)) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Got unexpected mac %pM instead of %pM\n", ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); return -EINVAL; } ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", eth_type, ethh->h_source); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", eth_hlen, ethh->h_dest); iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); if (eth_type == ETH_P_IP) { if (iph->protocol != IPPROTO_TCP) { DP_NOTICE(p_hwfn, "Unexpected ip protocol on ll2 %x\n", iph->protocol); return -EINVAL; } cm_info->local_ip[0] = ntohl(iph->daddr); cm_info->remote_ip[0] = ntohl(iph->saddr); cm_info->ip_version = QED_TCP_IPV4; ip_hlen = (iph->ihl) * sizeof(u32); *payload_len = ntohs(iph->tot_len) - ip_hlen; } else if (eth_type == ETH_P_IPV6) { ip6h = (struct ipv6hdr *)iph; if (ip6h->nexthdr != IPPROTO_TCP) { DP_NOTICE(p_hwfn, "Unexpected ip protocol on ll2 %x\n", iph->protocol); return -EINVAL; } for (i = 0; i < 4; i++) { cm_info->local_ip[i] = ntohl(ip6h->daddr.in6_u.u6_addr32[i]); cm_info->remote_ip[i] = ntohl(ip6h->saddr.in6_u.u6_addr32[i]); } cm_info->ip_version = QED_TCP_IPV6; ip_hlen = sizeof(*ip6h); *payload_len = ntohs(ip6h->payload_len); } else { DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); return -EINVAL; } tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); if (!tcph->syn) { DP_NOTICE(p_hwfn, "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", iph->ihl, tcph->source, tcph->dest); return -EINVAL; } cm_info->local_port = ntohs(tcph->dest); cm_info->remote_port = ntohs(tcph->source); qed_iwarp_print_cm_info(p_hwfn, cm_info); *tcp_start_offset = eth_hlen + ip_hlen; return 0; } static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, u16 cid) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; struct qed_iwarp_fpdu *partial_fpdu; u32 idx; idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); if (idx >= iwarp_info->max_num_partial_fpdus) { DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, iwarp_info->max_num_partial_fpdus); return NULL; } partial_fpdu = &iwarp_info->partial_fpdus[idx]; return partial_fpdu; } enum qed_iwarp_mpa_pkt_type { QED_IWARP_MPA_PKT_PACKED, QED_IWARP_MPA_PKT_PARTIAL, QED_IWARP_MPA_PKT_UNALIGNED }; #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) /* Pad to multiple of 4 */ #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ QED_IWARP_MPA_CRC32_DIGEST_SIZE) /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ #define QED_IWARP_MAX_BDS_PER_FPDU 3 static const char * const pkt_type_str[] = { "QED_IWARP_MPA_PKT_PACKED", "QED_IWARP_MPA_PKT_PARTIAL", "QED_IWARP_MPA_PKT_UNALIGNED" }; static int qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, struct qed_iwarp_ll2_buff *buf); static enum qed_iwarp_mpa_pkt_type qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, u16 tcp_payload_len, u8 *mpa_data) { enum qed_iwarp_mpa_pkt_type pkt_type; u16 mpa_len; if (fpdu->incomplete_bytes) { pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; goto out; } /* special case of one byte remaining... * lower byte will be read next packet */ if (tcp_payload_len == 1) { fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; pkt_type = QED_IWARP_MPA_PKT_PARTIAL; goto out; } mpa_len = ntohs(*(__force __be16 *)mpa_data); fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); if (fpdu->fpdu_length <= tcp_payload_len) pkt_type = QED_IWARP_MPA_PKT_PACKED; else pkt_type = QED_IWARP_MPA_PKT_PARTIAL; out: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); return pkt_type; } static void qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, struct qed_iwarp_fpdu *fpdu, struct unaligned_opaque_data *pkt_data, u16 tcp_payload_size, u8 placement_offset) { u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); fpdu->mpa_buf = buf; fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset; fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset; if (tcp_payload_size == 1) fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; else if (tcp_payload_size < fpdu->fpdu_length) fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; else fpdu->incomplete_bytes = 0; /* complete fpdu */ fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; } static int qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, struct unaligned_opaque_data *pkt_data, struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) { u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; int rc; /* need to copy the data from the partial packet stored in fpdu * to the new buf, for this we also need to move the data currently * placed on the buf. The assumption is that the buffer is big enough * since fpdu_length <= mss, we use an intermediate buffer since * we may need to copy the new data to an overlapping location */ if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { DP_ERR(p_hwfn, "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, fpdu->incomplete_bytes); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", fpdu->mpa_frag_virt, fpdu->mpa_frag_len, (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); memcpy(tmp_buf + fpdu->mpa_frag_len, (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); if (rc) return rc; /* If we managed to post the buffer copy the data to the new buffer * o/w this will occur in the next round... */ memcpy((u8 *)(buf->data), tmp_buf, fpdu->mpa_frag_len + tcp_payload_size); fpdu->mpa_buf = buf; /* fpdu->pkt_hdr remains as is */ /* fpdu->mpa_frag is overridden with new buf */ fpdu->mpa_frag = buf->data_phys_addr; fpdu->mpa_frag_virt = buf->data; fpdu->mpa_frag_len += tcp_payload_size; fpdu->incomplete_bytes -= tcp_payload_size; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, fpdu->incomplete_bytes); return 0; } static void qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) { u16 mpa_len; /* Update incomplete packets if needed */ if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { /* Missing lower byte is now available */ mpa_len = fpdu->fpdu_length | *mpa_data; fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); /* one byte of hdr */ fpdu->mpa_frag_len = 1; fpdu->incomplete_bytes = fpdu->fpdu_length - 1; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); } } #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ (GET_FIELD((_curr_pkt)->flags, \ UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) /* This function is used to recycle a buffer using the ll2 drop option. It * uses the mechanism to ensure that all buffers posted to tx before this one * were completed. The buffer sent here will be sent as a cookie in the tx * completion function and can then be reposted to rx chain when done. The flow * that requires this is the flow where a FPDU splits over more than 3 tcp * segments. In this case the driver needs to re-post a rx buffer instead of * the one received, but driver can't simply repost a buffer it copied from * as there is a case where the buffer was originally a packed FPDU, and is * partially posted to FW. Driver needs to ensure FW is done with it. */ static int qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, struct qed_iwarp_ll2_buff *buf) { struct qed_ll2_tx_pkt_info tx_pkt; u8 ll2_handle; int rc; memset(&tx_pkt, 0, sizeof(tx_pkt)); tx_pkt.num_of_bds = 1; tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; tx_pkt.first_frag = fpdu->pkt_hdr; tx_pkt.first_frag_len = fpdu->pkt_hdr_size; buf->piggy_buf = NULL; tx_pkt.cookie = buf; ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); if (rc) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Can't drop packet rc=%d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", (unsigned long int)tx_pkt.first_frag, tx_pkt.first_frag_len, buf, rc); return rc; } static int qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) { struct qed_ll2_tx_pkt_info tx_pkt; u8 ll2_handle; int rc; memset(&tx_pkt, 0, sizeof(tx_pkt)); tx_pkt.num_of_bds = 1; tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; tx_pkt.first_frag = fpdu->pkt_hdr; tx_pkt.first_frag_len = fpdu->pkt_hdr_size; tx_pkt.enable_ip_cksum = true; tx_pkt.enable_l4_cksum = true; tx_pkt.calc_ip_len = true; /* vlan overload with enum iwarp_ll2_tx_queues */ tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); if (rc) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Can't send right edge rc=%d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", tx_pkt.num_of_bds, (unsigned long int)tx_pkt.first_frag, tx_pkt.first_frag_len, rc); return rc; } static int qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, struct unaligned_opaque_data *curr_pkt, struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) { struct qed_ll2_tx_pkt_info tx_pkt; u16 first_mpa_offset; u8 ll2_handle; int rc; memset(&tx_pkt, 0, sizeof(tx_pkt)); /* An unaligned packet means it's split over two tcp segments. So the * complete packet requires 3 bds, one for the header, one for the * part of the fpdu of the first tcp segment, and the last fragment * will point to the remainder of the fpdu. A packed pdu, requires only * two bds, one for the header and one for the data. */ tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ /* Send the mpa_buf only with the last fpdu (in case of packed) */ if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || tcp_payload_size <= fpdu->fpdu_length) tx_pkt.cookie = fpdu->mpa_buf; tx_pkt.first_frag = fpdu->pkt_hdr; tx_pkt.first_frag_len = fpdu->pkt_hdr_size; tx_pkt.enable_ip_cksum = true; tx_pkt.enable_l4_cksum = true; tx_pkt.calc_ip_len = true; /* vlan overload with enum iwarp_ll2_tx_queues */ tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; /* special case of unaligned packet and not packed, need to send * both buffers as cookie to release. */ if (tcp_payload_size == fpdu->incomplete_bytes) fpdu->mpa_buf->piggy_buf = buf; ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; /* Set first fragment to header */ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); if (rc) goto out; /* Set second fragment to first part of packet */ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, fpdu->mpa_frag, fpdu->mpa_frag_len); if (rc) goto out; if (!fpdu->incomplete_bytes) goto out; first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); /* Set third fragment to second part of the packet */ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, buf->data_phys_addr + first_mpa_offset, fpdu->incomplete_bytes); out: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n", tx_pkt.num_of_bds, tx_pkt.first_frag_len, fpdu->mpa_frag_len, fpdu->incomplete_bytes, rc); return rc; } static void qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, struct unaligned_opaque_data *curr_pkt, u32 opaque_data0, u32 opaque_data1) { u64 opaque_data; opaque_data = HILO_64(cpu_to_le32(opaque_data1), cpu_to_le32(opaque_data0)); *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); le16_add_cpu(&curr_pkt->first_mpa_offset, curr_pkt->tcp_payload_offset); } /* This function is called when an unaligned or incomplete MPA packet arrives * driver needs to align the packet, perhaps using previous data and send * it down to FW once it is aligned. */ static int qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_mpa_buf *mpa_buf) { struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; enum qed_iwarp_mpa_pkt_type pkt_type; struct qed_iwarp_fpdu *fpdu; u16 cid, first_mpa_offset; int rc = -EINVAL; u8 *mpa_data; cid = le32_to_cpu(curr_pkt->cid); fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (!fpdu) { /* something corrupt with cid, post rx back */ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", cid); goto err; } do { first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); mpa_data = ((u8 *)(buf->data) + first_mpa_offset); pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, mpa_buf->tcp_payload_len, mpa_data); switch (pkt_type) { case QED_IWARP_MPA_PKT_PARTIAL: qed_iwarp_init_fpdu(buf, fpdu, curr_pkt, mpa_buf->tcp_payload_len, mpa_buf->placement_offset); if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { mpa_buf->tcp_payload_len = 0; break; } rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Can't send FPDU:reset rc=%d\n", rc); memset(fpdu, 0, sizeof(*fpdu)); break; } mpa_buf->tcp_payload_len = 0; break; case QED_IWARP_MPA_PKT_PACKED: qed_iwarp_init_fpdu(buf, fpdu, curr_pkt, mpa_buf->tcp_payload_len, mpa_buf->placement_offset); rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, mpa_buf->tcp_payload_len, pkt_type); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Can't send FPDU:reset rc=%d\n", rc); memset(fpdu, 0, sizeof(*fpdu)); break; } mpa_buf->tcp_payload_len -= fpdu->fpdu_length; le16_add_cpu(&curr_pkt->first_mpa_offset, fpdu->fpdu_length); break; case QED_IWARP_MPA_PKT_UNALIGNED: qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { /* special handling of fpdu split over more * than 2 segments */ if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); /* packet will be re-processed later */ if (rc) return rc; } rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt, buf, mpa_buf->tcp_payload_len); if (rc) /* packet will be re-processed later */ return rc; mpa_buf->tcp_payload_len = 0; break; } rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, mpa_buf->tcp_payload_len, pkt_type); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Can't send FPDU:delay rc=%d\n", rc); /* don't reset fpdu -> we need it for next * classify */ break; } mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; le16_add_cpu(&curr_pkt->first_mpa_offset, fpdu->incomplete_bytes); /* The framed PDU was sent - no more incomplete bytes */ fpdu->incomplete_bytes = 0; break; } } while (mpa_buf->tcp_payload_len && !rc); return rc; err: qed_iwarp_ll2_post_rx(p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); return rc; } static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; int rc; while (!list_empty(&iwarp_info->mpa_buf_pending_list)) { mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, struct qed_iwarp_ll2_mpa_buf, list_entry); rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); /* busy means break and continue processing later, don't * remove the buf from the pending list. */ if (rc == -EBUSY) break; list_move_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); if (rc) { /* different error, don't continue */ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); break; } } } static void qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) { struct qed_iwarp_ll2_mpa_buf *mpa_buf; struct qed_iwarp_info *iwarp_info; struct qed_hwfn *p_hwfn = cxt; u16 first_mpa_offset; iwarp_info = &p_hwfn->p_rdma_info->iwarp; mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, struct qed_iwarp_ll2_mpa_buf, list_entry); if (!mpa_buf) { DP_ERR(p_hwfn, "No free mpa buf\n"); goto err; } list_del(&mpa_buf->list_entry); qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, data->opaque_data_0, data->opaque_data_1); first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", data->length.packet_length, first_mpa_offset, mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, mpa_buf->data.cid); mpa_buf->ll2_buf = data->cookie; mpa_buf->tcp_payload_len = data->length.packet_length - first_mpa_offset; first_mpa_offset += data->u.placement_offset; mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset); mpa_buf->placement_offset = data->u.placement_offset; list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); qed_iwarp_process_pending_pkts(p_hwfn); return; err: qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, iwarp_info->ll2_mpa_handle); } static void qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) { struct qed_iwarp_ll2_buff *buf = data->cookie; struct qed_iwarp_listener *listener; struct qed_ll2_tx_pkt_info tx_pkt; struct qed_iwarp_cm_info cm_info; struct qed_hwfn *p_hwfn = cxt; u8 remote_mac_addr[ETH_ALEN]; u8 local_mac_addr[ETH_ALEN]; struct qed_iwarp_ep *ep; int tcp_start_offset; u8 ll2_syn_handle; int payload_len; u32 hdr_size; int rc; memset(&cm_info, 0, sizeof(cm_info)); ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; /* Check if packet was received with errors... */ if (data->err_flags) { DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n", data->err_flags); goto err; } if (GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n"); goto err; } rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + data->u.placement_offset, remote_mac_addr, local_mac_addr, &payload_len, &tcp_start_offset); if (rc) goto err; /* Check if there is a listener for this 4-tuple+vlan */ listener = qed_iwarp_get_listener(p_hwfn, &cm_info); if (!listener) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", data->parse_flags, data->length.packet_length); memset(&tx_pkt, 0, sizeof(tx_pkt)); tx_pkt.num_of_bds = 1; tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; tx_pkt.first_frag = buf->data_phys_addr + data->u.placement_offset; tx_pkt.first_frag_len = data->length.packet_length; tx_pkt.cookie = buf; rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, &tx_pkt, true); if (rc) { DP_NOTICE(p_hwfn, "Can't post SYN back to chip rc=%d\n", rc); goto err; } return; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); /* There may be an open ep on this connection if this is a syn * retrasnmit... need to make sure there isn't... */ if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) goto err; ep = qed_iwarp_get_free_ep(p_hwfn); if (!ep) goto err; spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); ether_addr_copy(ep->local_mac_addr, local_mac_addr); memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = listener->event_cb; ep->cb_context = listener->cb_context; ep->connect_mode = TCP_CONNECT_PASSIVE; ep->syn = buf; ep->syn_ip_payload_length = (u16)payload_len; ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + tcp_start_offset; rc = qed_iwarp_tcp_offload(p_hwfn, ep); if (rc) { qed_iwarp_return_ep(p_hwfn, ep); goto err; } return; err: qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); } static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, void *cookie, dma_addr_t rx_buf_addr, bool b_last_packet) { struct qed_iwarp_ll2_buff *buffer = cookie; struct qed_hwfn *p_hwfn = cxt; dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, buffer->data, buffer->data_phys_addr); kfree(buffer); } static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { struct qed_iwarp_ll2_buff *buffer = cookie; struct qed_iwarp_ll2_buff *piggy; struct qed_hwfn *p_hwfn = cxt; if (!buffer) /* can happen in packed mpa unaligned... */ return; /* this was originally an rx packet, post it back */ piggy = buffer->piggy_buf; if (piggy) { buffer->piggy_buf = NULL; qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle); } qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) qed_iwarp_process_pending_pkts(p_hwfn); return; } static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { struct qed_iwarp_ll2_buff *buffer = cookie; struct qed_hwfn *p_hwfn = cxt; if (!buffer) return; if (buffer->piggy_buf) { dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->piggy_buf->buff_size, buffer->piggy_buf->data, buffer->piggy_buf->data_phys_addr); kfree(buffer->piggy_buf); } dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, buffer->data, buffer->data_phys_addr); kfree(buffer); } /* The only slowpath for iwarp ll2 is unalign flush. When this completion * is received, need to reset the FPDU. */ static void qed_iwarp_ll2_slowpath(void *cxt, u8 connection_handle, u32 opaque_data_0, u32 opaque_data_1) { struct unaligned_opaque_data unalign_data; struct qed_hwfn *p_hwfn = cxt; struct qed_iwarp_fpdu *fpdu; u32 cid; qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, opaque_data_0, opaque_data_1); cid = le32_to_cpu(unalign_data.cid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid); fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (fpdu) memset(fpdu, 0, sizeof(*fpdu)); } static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; int rc = 0; if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { rc = qed_ll2_terminate_connection(p_hwfn, iwarp_info->ll2_syn_handle); if (rc) DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle); iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; } if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { rc = qed_ll2_terminate_connection(p_hwfn, iwarp_info->ll2_ooo_handle); if (rc) DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle); iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; } if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { rc = qed_ll2_terminate_connection(p_hwfn, iwarp_info->ll2_mpa_handle); if (rc) DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle); iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; } qed_llh_remove_mac_filter(p_hwfn->cdev, 0, p_hwfn->p_rdma_info->iwarp.mac_addr); return rc; } static int qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, int num_rx_bufs, int buff_size, u8 ll2_handle) { struct qed_iwarp_ll2_buff *buffer; int rc = 0; int i; for (i = 0; i < num_rx_bufs; i++) { buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { rc = -ENOMEM; break; } buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, buff_size, &buffer->data_phys_addr, GFP_KERNEL); if (!buffer->data) { kfree(buffer); rc = -ENOMEM; break; } buffer->buff_size = buff_size; rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); if (rc) /* buffers will be deallocated by qed_ll2 */ break; } return rc; } #define QED_IWARP_MAX_BUF_SIZE(mtu) \ ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ ETH_CACHE_LINE_SIZE) static int qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_rdma_start_in_params *params, u32 rcv_wnd_size) { struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; iwarp_info = &p_hwfn->p_rdma_info->iwarp; iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->max_mtu = params->max_mtu; ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr); rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr); if (rc) return rc; /* Start SYN connection */ cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; cbs.slowpath_cb = NULL; cbs.cookie = p_hwfn; memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; /* SYN will use ctx based queues */ data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX; data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ data.input.tx_tc = PKT_LB_TC; data.input.tx_dest = QED_LL2_TX_DEST_LB; data.p_connection_handle = &iwarp_info->ll2_syn_handle; data.cbs = &cbs; rc = qed_ll2_acquire_connection(p_hwfn, &data); if (rc) { DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n"); qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr); return rc; } rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); if (rc) { DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n"); goto err; } buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; /* Start OOO connection */ data.input.conn_type = QED_LL2_TYPE_OOO; /* OOO/unaligned will use legacy ll2 queues (ram based) */ data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY; data.input.mtu = params->max_mtu; n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) / iwarp_info->max_mtu; n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); data.input.rx_num_desc = n_ooo_bufs; data.input.rx_num_ooo_buffers = n_ooo_bufs; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; data.p_connection_handle = &iwarp_info->ll2_ooo_handle; rc = qed_ll2_acquire_connection(p_hwfn, &data); if (rc) goto err; rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); if (rc) goto err; /* Start Unaligned MPA connection */ cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; cbs.slowpath_cb = qed_iwarp_ll2_slowpath; memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; data.input.mtu = params->max_mtu; /* FW requires that once a packet arrives OOO, it must have at * least 2 rx buffers available on the unaligned connection * for handling the case that it is a partial fpdu. */ data.input.rx_num_desc = n_ooo_bufs * 2; data.input.tx_num_desc = data.input.rx_num_desc; data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; data.input.tx_tc = PKT_LB_TC; data.input.tx_dest = QED_LL2_TX_DEST_LB; data.p_connection_handle = &iwarp_info->ll2_mpa_handle; data.input.secondary_queue = true; data.cbs = &cbs; rc = qed_ll2_acquire_connection(p_hwfn, &data); if (rc) goto err; rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); if (rc) goto err; rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, sizeof(*iwarp_info->partial_fpdus), GFP_KERNEL); if (!iwarp_info->partial_fpdus) { rc = -ENOMEM; goto err; } iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) { rc = -ENOMEM; goto err; } /* The mpa_bufs array serves for pending RX packets received on the * mpa ll2 that don't have place on the tx ring and require later * processing. We can't fail on allocation of such a struct therefore * we allocate enough to take care of all rx packets */ iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, sizeof(*iwarp_info->mpa_bufs), GFP_KERNEL); if (!iwarp_info->mpa_bufs) { rc = -ENOMEM; goto err; } INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); for (i = 0; i < data.input.rx_num_desc; i++) list_add_tail(&iwarp_info->mpa_bufs[i].list_entry, &iwarp_info->mpa_buf_list); return rc; err: qed_iwarp_ll2_stop(p_hwfn); return rc; } static struct { u32 two_ports; u32 four_ports; } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = { {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P}, {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P} }; int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_rdma_start_in_params *params) { struct qed_dev *cdev = p_hwfn->cdev; struct qed_iwarp_info *iwarp_info; enum chip_ids chip_id; u32 rcv_wnd_size; iwarp_info = &p_hwfn->p_rdma_info->iwarp; iwarp_info->tcp_flags = QED_IWARP_TS_EN; chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ? qed_iwarp_rcv_wnd_size[chip_id].four_ports : qed_iwarp_rcv_wnd_size[chip_id].two_ports; /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - ilog2(QED_IWARP_RCV_WND_SIZE_MIN); iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | MPA_RTR_TYPE_ZERO_WRITE | MPA_RTR_TYPE_ZERO_READ; spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, qed_iwarp_async_event); qed_ooo_setup(p_hwfn); return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size); } int qed_iwarp_stop(struct qed_hwfn *p_hwfn) { int rc; qed_iwarp_free_prealloc_ep(p_hwfn); rc = qed_iwarp_wait_for_all_cids(p_hwfn); if (rc) return rc; return qed_iwarp_ll2_stop(p_hwfn); } static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { struct qed_iwarp_cm_event_params params; qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); params.event = QED_IWARP_EVENT_CLOSE; params.ep_context = ep; params.cm_info = &ep->cm_info; params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? 0 : -ECONNRESET; /* paired with READ_ONCE in destroy_qp */ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_del(&ep->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); ep->event_cb(ep->cb_context, &params); } static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, int fw_ret_code) { struct qed_iwarp_cm_event_params params; bool event_cb = false; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", ep->cid, fw_ret_code); switch (fw_ret_code) { case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: params.status = 0; params.event = QED_IWARP_EVENT_DISCONNECT; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_LLP_RESET: params.status = -ECONNRESET; params.event = QED_IWARP_EVENT_DISCONNECT; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: params.event = QED_IWARP_EVENT_RQ_EMPTY; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_IRQ_FULL: params.event = QED_IWARP_EVENT_IRQ_FULL; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: params.event = QED_IWARP_EVENT_LLP_TIMEOUT; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: params.event = QED_IWARP_EVENT_CQ_OVERFLOW; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; event_cb = true; break; case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; event_cb = true; break; default: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Unhandled exception received...fw_ret_code=%d\n", fw_ret_code); break; } if (event_cb) { params.ep_context = ep; params.cm_info = &ep->cm_info; ep->event_cb(ep->cb_context, &params); } } static void qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { struct qed_iwarp_cm_event_params params; memset(&params, 0, sizeof(params)); params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; params.ep_context = ep; params.cm_info = &ep->cm_info; /* paired with READ_ONCE in destroy_qp */ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); switch (fw_return_code) { case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s(0x%x) TCP connect got invalid packet\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); params.status = -ECONNRESET; break; case IWARP_CONN_ERROR_TCP_CONNECTION_RST: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s(0x%x) TCP Connection Reset\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); params.status = -ECONNRESET; break; case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); params.status = -EBUSY; break; case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); params.status = -ECONNREFUSED; break; case IWARP_CONN_ERROR_MPA_INVALID_PACKET: DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); params.status = -ECONNRESET; break; default: DP_ERR(p_hwfn, "%s(0x%x) Unexpected return code tcp connect: %d\n", QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid, fw_return_code); params.status = -ECONNRESET; break; } if (ep->connect_mode == TCP_CONNECT_PASSIVE) { ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; qed_iwarp_return_ep(p_hwfn, ep); } else { ep->event_cb(ep->cb_context, &params); spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_del(&ep->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } } static void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; if (ep->connect_mode == TCP_CONNECT_PASSIVE) { /* Done with the SYN packet, post back to ll2 rx */ qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); ep->syn = NULL; /* If connect failed - upper layer doesn't know about it */ if (fw_return_code == RDMA_RETURN_OK) qed_iwarp_mpa_received(p_hwfn, ep); else qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, fw_return_code); } else { if (fw_return_code == RDMA_RETURN_OK) qed_iwarp_mpa_offload(p_hwfn, ep); else qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, fw_return_code); } } static inline bool qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { if (!ep || (ep->sig != QED_EP_SIG)) { DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); return false; } return true; } static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; struct regpair *fw_handle = &data->rdma_data.async_handle; struct qed_iwarp_ep *ep = NULL; u16 srq_offset; u16 srq_id; u16 cid; ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, fw_handle->lo); switch (fw_event_code) { case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: /* Async completion after TCP 3-way handshake */ if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", ep->tcp_cid, fw_return_code); qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); break; case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", ep->cid, fw_return_code); qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); break; case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: /* Async completion for Close Connection ramrod */ if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", ep->cid, fw_return_code); qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); break; case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: /* Async event for active side only */ if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", ep->cid, fw_return_code); qed_iwarp_mpa_reply_arrived(p_hwfn, ep); break; case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", ep->cid, fw_return_code); qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); break; case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: cid = (u16)le32_to_cpu(fw_handle->lo); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); qed_iwarp_cid_cleaned(p_hwfn, cid); break; case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); srq_offset = p_hwfn->p_rdma_info->srq_id_offset; /* FW assigns value that is no greater than u16 */ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; events.affiliated_event(events.context, QED_IWARP_EVENT_SRQ_EMPTY, &srq_id); break; case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); srq_offset = p_hwfn->p_rdma_info->srq_id_offset; /* FW assigns value that is no greater than u16 */ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; events.affiliated_event(events.context, QED_IWARP_EVENT_SRQ_LIMIT, &srq_id); break; case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); p_hwfn->p_rdma_info->events.affiliated_event( p_hwfn->p_rdma_info->events.context, QED_IWARP_EVENT_CQ_OVERFLOW, (void *)fw_handle); break; default: DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", fw_event_code); return -EINVAL; } return 0; } int qed_iwarp_create_listen(void *rdma_cxt, struct qed_iwarp_listen_in *iparams, struct qed_iwarp_listen_out *oparams) { struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_iwarp_listener *listener; listener = kzalloc(sizeof(*listener), GFP_KERNEL); if (!listener) return -ENOMEM; listener->ip_version = iparams->ip_version; memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); listener->port = iparams->port; listener->vlan = iparams->vlan; listener->event_cb = iparams->event_cb; listener->cb_context = iparams->cb_context; listener->max_backlog = iparams->max_backlog; oparams->handle = listener; spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_add_tail(&listener->list_entry, &p_hwfn->p_rdma_info->iwarp.listen_list); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", listener->event_cb, listener, listener->ip_addr[0], listener->ip_addr[1], listener->ip_addr[2], listener->ip_addr[3], listener->port, listener->vlan); return 0; } int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) { struct qed_iwarp_listener *listener = handle; struct qed_hwfn *p_hwfn = rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); list_del(&listener->list_entry); spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); kfree(listener); return 0; } int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) { struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; struct qed_iwarp_ep *ep; struct qed_rdma_qp *qp; int rc; ep = iparams->ep_context; if (!ep) { DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); return -EINVAL; } qp = ep->qp; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", qp->icid, ep->tcp_cid); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_CB; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, PROTOCOLID_IWARP, &init_data); if (rc) return rc; rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); return rc; } void qed_iwarp_query_qp(struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) { out_params->state = qed_iwarp2roce_state(qp->iwarp_state); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/crc32.h> #include <linux/etherdevice.h> #include "qed.h" #include "qed_sriov.h" #include "qed_vf.h" static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; void *p_tlv; /* This lock is released when we receive PF's response * in qed_send_msg2pf(). * So, qed_vf_pf_prep() and qed_send_msg2pf() * must come in sequence. */ mutex_lock(&(p_iov->mutex)); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "preparing to send 0x%04x tlv over vf pf channel\n", type); /* Reset Request offset */ p_iov->offset = (u8 *)p_iov->vf2pf_request; /* Clear mailbox - both request and reply */ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); /* Init type and length */ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); /* Init first tlv header */ ((struct vfpf_first_tlv *)p_tlv)->reply_address = (u64)p_iov->pf2vf_reply_phys; return p_tlv; } static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) { union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF request status = 0x%x, PF reply status = 0x%x\n", req_status, resp->default_resp.hdr.status); mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } #define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 #define QED_VF_CHANNEL_USLEEP_DELAY 100 #define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 #define QED_VF_CHANNEL_MSLEEP_DELAY 25 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; /* output tlvs list */ qed_dp_tlv_list(p_hwfn, p_req); /* Send TLVs over HW channel */ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); trigger.vf_pf_msg_valid = 1; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID), upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), &zone_data->non_trigger.vf_pf_msg_addr, *((u32 *)&trigger), &zone_data->trigger); REG_WR(p_hwfn, (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); REG_WR(p_hwfn, (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); /* The message data must be written first, to prevent trigger before * data is written. */ wmb(); REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the * `done' address from a coherent DMA zone. Poll until then. */ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; while (!*done && iter--) { udelay(QED_VF_CHANNEL_USLEEP_DELAY); dma_rmb(); } iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; while (!*done && iter--) { msleep(QED_VF_CHANNEL_MSLEEP_DELAY); dma_rmb(); } if (!*done) { DP_NOTICE(p_hwfn, "VF <-- PF Timeout [Type %d]\n", p_req->first_tlv.tl.type); rc = -EBUSY; } else { if ((*done != PFVF_STATUS_SUCCESS) && (*done != PFVF_STATUS_NO_RESOURCE)) DP_NOTICE(p_hwfn, "PF response: %d [Type %d]\n", *done, p_req->first_tlv.tl.type); else DP_VERBOSE(p_hwfn, QED_MSG_IOV, "PF response: %d [Type %d]\n", *done, p_req->first_tlv.tl.type); } return rc; } static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_qid_tlv *p_qid_tlv; /* Only add QIDs for the queue if it was negotiated with PF */ if (!(p_iov->acquire_resp.pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) return; p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); p_qid_tlv->qid = p_cid->qid_usage_idx; } static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp; struct vfpf_first_tlv *req; u32 size; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) rc = -EAGAIN; qed_vf_pf_req_end(p_hwfn, rc); if (!b_final) return rc; p_hwfn->b_int_enabled = 0; if (p_iov->vf2pf_request) dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), p_iov->vf2pf_request, p_iov->vf2pf_request_phys); if (p_iov->pf2vf_reply) dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union pfvf_tlvs), p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); if (p_iov->bulletin.p_virt) { size = sizeof(struct qed_bulletin_content); dma_free_coherent(&p_hwfn->cdev->pdev->dev, size, p_iov->bulletin.p_virt, p_iov->bulletin.phys); } kfree(p_hwfn->vf_iov_info); p_hwfn->vf_iov_info = NULL; return rc; } int qed_vf_pf_release(struct qed_hwfn *p_hwfn) { return _qed_vf_pf_release(p_hwfn, true); } #define VF_ACQUIRE_THRESH 3 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, struct vf_pf_resc_request *p_req, struct pf_vf_resc *p_resp) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, p_resp->num_txqs, p_req->num_sbs, p_resp->num_sbs, p_req->num_mac_filters, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, p_req->num_mc_filters, p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); /* humble our request */ p_req->num_txqs = p_resp->num_txqs; p_req->num_rxqs = p_resp->num_rxqs; p_req->num_sbs = p_resp->num_sbs; p_req->num_mac_filters = p_resp->num_mac_filters; p_req->num_vlan_filters = p_resp->num_vlan_filters; p_req->num_mc_filters = p_resp->num_mc_filters; p_req->num_cids = p_resp->num_cids; } static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vf_pf_resc_request *p_resc; u8 retry_cnt = VF_ACQUIRE_THRESH; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); p_resc = &req->resc_request; /* starting filling the request */ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; req->vfdev_info.fw_minor = FW_MINOR_VERSION; req->vfdev_info.fw_revision = FW_REVISION_VERSION; req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; /* Fill capability field with any non-deprecated config we support */ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; /* If we've mapped the doorbell bar, try using queue qids */ if (p_iov->b_doorbell_bar) { req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | VFPF_ACQUIRE_CAP_QUEUE_QIDS; p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; } /* pf 2 vf bulletin board address */ req->bulletin_addr = p_iov->bulletin.phys; req->bulletin_size = p_iov->bulletin.size; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); while (!resources_acquired) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "attempting to acquire resources\n"); /* Clear response buffer, as this might be a re-send */ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); /* Re-try acquire in case of vf-pf hw channel timeout */ if (retry_cnt && rc == -EBUSY) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF retrying to acquire due to VPC timeout\n"); retry_cnt--; continue; } if (rc) goto exit; /* copy acquire response from buffer to p_hwfn */ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); attempts++; if (resp->hdr.status == PFVF_STATUS_SUCCESS) { /* PF agrees to allocate our resources */ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { /* It's possible legacy PF mistakenly accepted; * but we don't care - simply mark it as * legacy and continue. */ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PRE_FP_HSI; } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); resources_acquired = true; } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && attempts < VF_ACQUIRE_THRESH) { qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, &resp->resc); } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { if (pfdev_info->major_fp_hsi && (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { DP_NOTICE(p_hwfn, "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", pfdev_info->major_fp_hsi, pfdev_info->minor_fp_hsi, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi); rc = -EINVAL; goto exit; } if (!pfdev_info->major_fp_hsi) { if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) { DP_NOTICE(p_hwfn, "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); rc = -EINVAL; goto exit; } else { DP_INFO(p_hwfn, "PF is old - try re-acquire to see if it supports FW-version override\n"); req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PRE_FP_HSI; continue; } } /* If PF/VF are using same Major, PF must have had * it's reasons. Simply fail. */ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); rc = -EINVAL; goto exit; } else { DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", resp->hdr.status); rc = -EAGAIN; goto exit; } } /* Mark the PF as legacy, if needed */ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) p_iov->b_pre_fp_hsi = true; /* In case PF doesn't support multi-queue Tx, update the number of * CIDs to reflect the number of queues [older PFs didn't fill that * field]. */ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; /* get HW info */ p_hwfn->cdev->type = resp->pfdev_info.dev_type; p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; /* Learn of the possibility of CMT */ if (IS_LEAD_HWFN(p_hwfn)) { if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { DP_NOTICE(p_hwfn, "100g VF\n"); p_hwfn->cdev->num_hwfns = 2; } } if (!p_iov->b_pre_fp_hsi && (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { DP_INFO(p_hwfn, "PF is using older fastpath HSI; %02x.%02x is configured\n", ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { u32 bar_size; /* Regview size is fixed */ if (bar_id == BAR_ID_0) return 1 << 17; /* Doorbell is received from PF */ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; if (bar_size) return 1 << bar_size; return 0; } int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) { struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev); struct qed_vf_iov *p_iov; u32 reg; int rc; /* Set number of hwfns - might be overridden once leading hwfn learns * actual configuration from PF. */ if (IS_LEAD_HWFN(p_hwfn)) p_hwfn->cdev->num_hwfns = 1; reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); /* Allocate vf sriov info */ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); if (!p_iov) return -ENOMEM; /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell * value, but there are several incompatibily scenarios where that * would be incorrect and we'd need to override it. */ if (!p_hwfn->doorbells) { p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + PXP_VF_BAR0_START_DQ; } else if (p_hwfn == p_lead) { /* For leading hw-function, value is always correct, but need * to handle scenario where legacy PF would not support 100g * mapped bars later. */ p_iov->b_doorbell_bar = true; } else { /* here, value would be correct ONLY if the leading hwfn * received indication that mapped-bars are supported. */ if (p_lead->vf_iov_info->b_doorbell_bar) p_iov->b_doorbell_bar = true; else p_hwfn->doorbells = (u8 __iomem *) p_hwfn->regview + PXP_VF_BAR0_START_DQ; } /* Allocate vf2pf msg */ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), &p_iov->vf2pf_request_phys, GFP_KERNEL); if (!p_iov->vf2pf_request) goto free_p_iov; p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union pfvf_tlvs), &p_iov->pf2vf_reply_phys, GFP_KERNEL); if (!p_iov->pf2vf_reply) goto free_vf2pf_request; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", p_iov->vf2pf_request, (u64)p_iov->vf2pf_request_phys, p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); /* Allocate Bulletin board */ p_iov->bulletin.size = sizeof(struct qed_bulletin_content); p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); if (!p_iov->bulletin.p_virt) goto free_pf2vf_reply; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, (u64)p_iov->bulletin.phys, p_iov->bulletin.size); mutex_init(&p_iov->mutex); p_hwfn->vf_iov_info = p_iov; p_hwfn->hw_info.personality = QED_PCI_ETH; rc = qed_vf_pf_acquire(p_hwfn); /* If VF is 100g using a mapped bar and PF is too old to support that, * acquisition would succeed - but the VF would have no way knowing * the size of the doorbell bar configured in HW and thus will not * know how to split it for 2nd hw-function. * In this case we re-try without the indication of the mapped * doorbell. */ if (!rc && p_iov->b_doorbell_bar && !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) && (p_hwfn->cdev->num_hwfns > 1)) { rc = _qed_vf_pf_release(p_hwfn, false); if (rc) return rc; p_iov->b_doorbell_bar = false; p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + PXP_VF_BAR0_START_DQ; rc = qed_vf_pf_acquire(p_hwfn); } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells); return rc; free_pf2vf_reply: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union pfvf_tlvs), p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), p_iov->vf2pf_request, p_iov->vf2pf_request_phys); free_p_iov: kfree(p_iov); return -ENOMEM; } #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) static void __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_src, enum qed_tunn_mode mask, u8 *p_cls) { if (p_src->b_update_mode) { p_req->tun_mode_update_mask |= BIT(mask); if (p_src->b_mode_enabled) p_req->tunn_mode |= BIT(mask); } *p_cls = p_src->tun_cls; } static void qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_src, enum qed_tunn_mode mask, u8 *p_cls, struct qed_tunn_update_udp_port *p_port, u8 *p_update_port, u16 *p_udp_port) { if (p_port->b_update_port) { *p_update_port = 1; *p_udp_port = p_port->port; } __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); } void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) { if (p_tun->vxlan.b_mode_enabled) p_tun->vxlan.b_update_mode = true; if (p_tun->l2_geneve.b_mode_enabled) p_tun->l2_geneve.b_update_mode = true; if (p_tun->ip_geneve.b_mode_enabled) p_tun->ip_geneve.b_update_mode = true; if (p_tun->l2_gre.b_mode_enabled) p_tun->l2_gre.b_update_mode = true; if (p_tun->ip_gre.b_mode_enabled) p_tun->ip_gre.b_update_mode = true; p_tun->b_update_rx_cls = true; p_tun->b_update_tx_cls = true; } static void __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, u16 feature_mask, u8 tunn_mode, u8 tunn_cls, enum qed_tunn_mode val) { if (feature_mask & BIT(val)) { p_tun->b_mode_enabled = tunn_mode; p_tun->tun_cls = tunn_cls; } else { p_tun->b_mode_enabled = false; } } static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tun, struct pfvf_update_tunn_param_tlv *p_resp) { /* Update mode and classes provided by PF */ u16 feat_mask = p_resp->tunn_feature_mask; __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, p_resp->vxlan_mode, p_resp->vxlan_clss, QED_MODE_VXLAN_TUNN); __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, p_resp->l2geneve_mode, p_resp->l2geneve_clss, QED_MODE_L2GENEVE_TUNN); __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, p_resp->ipgeneve_mode, p_resp->ipgeneve_clss, QED_MODE_IPGENEVE_TUNN); __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, p_resp->l2gre_mode, p_resp->l2gre_clss, QED_MODE_L2GRE_TUNN); __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, p_resp->ipgre_mode, p_resp->ipgre_clss, QED_MODE_IPGRE_TUNN); p_tun->geneve_port.port = p_resp->geneve_udp_port; p_tun->vxlan_port.port = p_resp->vxlan_udp_port; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, p_tun->ip_geneve.b_mode_enabled, p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); } int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_src) { struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_update_tunn_param_tlv *p_resp; struct vfpf_update_tunn_param_tlv *p_req; int rc; p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_req)); if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) p_req->update_tun_cls = 1; qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, &p_req->vxlan_clss, &p_src->vxlan_port, &p_req->update_vxlan_port, &p_req->vxlan_port); qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, QED_MODE_L2GENEVE_TUNN, &p_req->l2geneve_clss, &p_src->geneve_port, &p_req->update_geneve_port, &p_req->geneve_port); __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, QED_MODE_IPGENEVE_TUNN, &p_req->ipgeneve_clss); __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); p_resp = &p_iov->pf2vf_reply->tunn_param_resp; rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); if (rc) goto exit; if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Failed to update tunnel parameters\n"); rc = -EINVAL; } qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_rxq_tlv *req; u8 rx_qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); req->rx_qid = rx_qid; req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; req->hw_sb = p_cid->sb_igu_id; req->sb_index = p_cid->sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; /* If PF is legacy, we'll need to calculate producers ourselves * as well as clean them. */ if (p_iov->b_pre_fp_hsi) { u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; u32 init_prod_val = 0; *pp_prod = (u8 __iomem *) p_hwfn->regview + MSTORM_QZONE_START(p_hwfn->cdev) + hw_qid * MSTORM_QZONE_SIZE; /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); } qed_vf_pf_add_qid(p_hwfn, p_cid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } /* Learn the address of the producer from the response */ if (!p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", rx_qid, *pp_prod, resp->offset); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_rxqs_tlv *req; struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); req->rx_qid = p_cid->rel.queue_id; req->num_rxqs = 1; req->cqe_completion = cqe_completion; qed_vf_pf_add_qid(p_hwfn, p_cid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; u16 qid = p_cid->rel.queue_id; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); req->tx_qid = qid; /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; req->hw_sb = p_cid->sb_igu_id; req->sb_index = p_cid->sb_idx; qed_vf_pf_add_qid(p_hwfn, p_cid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } /* Modern PFs provide the actual offsets, while legacy * provided only the queue id. */ if (!p_iov->b_pre_fp_hsi) { *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; } else { u8 cid = p_iov->acquire_resp.resc.cid[qid]; *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr_vf(cid, DQ_DEMS_LEGACY); } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_stop_txqs_tlv *req; struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; qed_vf_pf_add_qid(p_hwfn, p_cid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, u16 mtu, u8 inner_vlan_removal, enum qed_tpa_mode tpa_mode, u8 max_buffers_per_cqe, u8 only_untagged) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_vport_start_tlv *req; struct pfvf_def_resp_tlv *resp; int rc, i; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); req->mtu = mtu; req->vport_id = vport_id; req->inner_vlan_removal = inner_vlan_removal; req->tpa_mode = tpa_mode; req->max_buffers_per_cqe = max_buffers_per_cqe; req->only_untagged = only_untagged; /* status blocks */ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; if (p_sb) req->sb_addr[i] = p_sb->sb_phys; } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; int rc; /* clear mailbox and prep first tlv */ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, sizeof(struct vfpf_first_tlv)); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } static bool qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, u16 tlv) { switch (tlv) { case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: return !!(p_data->update_vport_active_rx_flg || p_data->update_vport_active_tx_flg); case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: return !!p_data->update_tx_switching_flg; case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: return !!p_data->update_inner_vlan_removal_flg; case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: return !!p_data->update_accept_any_vlan_flg; case CHANNEL_TLV_VPORT_UPDATE_MCAST: return !!p_data->update_approx_mcast_flg; case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: return !!(p_data->accept_flags.update_rx_mode_config || p_data->accept_flags.update_tx_mode_config); case CHANNEL_TLV_VPORT_UPDATE_RSS: return !!p_data->rss_params; case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: return !!p_data->sge_tpa_params; default: DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", tlv); return false; } } static void qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *p_resp; u16 tlv; for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) continue; p_resp = (struct pfvf_def_resp_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); if (p_resp && p_resp->hdr.status) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "TLV[%d] Configuration %s\n", tlv, (p_resp && p_resp->hdr.status) ? "succeeded" : "failed"); } } int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_vport_update_tlv *req; struct pfvf_def_resp_tlv *resp; u8 update_rx, update_tx; u16 size, tlv; int rc; resp = &p_iov->pf2vf_reply->default_resp; update_rx = p_params->update_vport_active_rx_flg; update_tx = p_params->update_vport_active_tx_flg; /* clear mailbox and prep header tlv */ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); /* Prepare extended tlvs */ if (update_rx || update_tx) { struct vfpf_vport_update_activate_tlv *p_act_tlv; size = sizeof(struct vfpf_vport_update_activate_tlv); p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, size); if (update_rx) { p_act_tlv->update_rx = update_rx; p_act_tlv->active_rx = p_params->vport_active_rx_flg; } if (update_tx) { p_act_tlv->update_tx = update_tx; p_act_tlv->active_tx = p_params->vport_active_tx_flg; } } if (p_params->update_tx_switching_flg) { struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; size = sizeof(struct vfpf_vport_update_tx_switch_tlv); tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; } if (p_params->update_approx_mcast_flg) { struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_MCAST, size); memcpy(p_mcast_tlv->bins, p_params->bins, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; update_tx = p_params->accept_flags.update_tx_mode_config; if (update_rx || update_tx) { struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; size = sizeof(struct vfpf_vport_update_accept_param_tlv); p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); if (update_rx) { p_accept_tlv->update_rx_mode = update_rx; p_accept_tlv->rx_accept_filter = p_params->accept_flags.rx_accept_filter; } if (update_tx) { p_accept_tlv->update_tx_mode = update_tx; p_accept_tlv->tx_accept_filter = p_params->accept_flags.tx_accept_filter; } } if (p_params->rss_params) { struct qed_rss_params *rss_params = p_params->rss_params; struct vfpf_vport_update_rss_tlv *p_rss_tlv; int i, table_size; size = sizeof(struct vfpf_vport_update_rss_tlv); p_rss_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_VPORT_UPDATE_RSS, size); if (rss_params->update_rss_config) p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_CONFIG_FLAG; if (rss_params->update_rss_capabilities) p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_CAPS_FLAG; if (rss_params->update_rss_ind_table) p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_IND_TABLE_FLAG; if (rss_params->update_rss_key) p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; p_rss_tlv->rss_enable = rss_params->rss_enable; p_rss_tlv->rss_caps = rss_params->rss_caps; p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, 1 << p_rss_tlv->rss_table_size_log); for (i = 0; i < table_size; i++) { struct qed_queue_cid *p_queue; p_queue = rss_params->rss_ind_table[i]; p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; } memcpy(p_rss_tlv->rss_key, rss_params->rss_key, sizeof(rss_params->rss_key)); } if (p_params->update_accept_any_vlan_flg) { struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; p_any_vlan_tlv->update_accept_any_vlan_flg = p_params->update_accept_any_vlan_flg; } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp; struct vfpf_first_tlv *req; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EAGAIN; goto exit; } p_hwfn->b_int_enabled = 0; exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd) { struct qed_sp_vport_update_params sp_params; int i; memset(&sp_params, 0, sizeof(sp_params)); sp_params.update_approx_mcast_flg = 1; if (p_filter_cmd->opcode == QED_FILTER_ADD) { for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { u32 bit; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); sp_params.bins[bit / 32] |= 1 << (bit % 32); } } qed_vf_pf_vport_update(p_hwfn, &sp_params); } int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_filter_ucast *p_ucast) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_ucast_filter_tlv *req; struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); req->opcode = (u8)p_ucast->opcode; req->type = (u8)p_ucast->type; memcpy(req->mac, p_ucast->mac, ETH_ALEN); req->vlan = p_ucast->vlan; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EAGAIN; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; int rc; /* clear mailbox and prep first tlv */ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, sizeof(struct vfpf_first_tlv)); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { rc = -EINVAL; goto exit; } exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_read_coal_resp_tlv *resp; struct vfpf_read_coal_req_tlv *req; int rc; /* clear mailbox and prep header tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); req->qid = p_cid->rel.queue_id; req->is_rx = p_cid->b_is_rx ? 1 : 0; qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->read_coal_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) goto exit; *p_coal = resp->coal; exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_bulletin_update_mac_tlv *p_req; struct pfvf_def_resp_tlv *p_resp; int rc; if (!p_mac) return -EINVAL; /* clear mailbox and prep header tlv */ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, sizeof(*p_req)); ether_addr_copy(p_req->mac, p_mac); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Requesting bulletin update for MAC[%pM]\n", p_mac); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); p_resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); qed_vf_pf_req_end(p_hwfn, rc); return rc; } int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_update_coalesce *req; struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep header tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); req->rx_coal = rx_coal; req->tx_coal = tx_coal; req->qid = p_cid->rel.queue_id; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", rx_coal, tx_coal, req->qid); /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); if (rc) goto exit; if (resp->hdr.status != PFVF_STATUS_SUCCESS) goto exit; if (rx_coal) p_hwfn->cdev->rx_coalesce_usecs = rx_coal; if (tx_coal) p_hwfn->cdev->tx_coalesce_usecs = tx_coal; exit: qed_vf_pf_req_end(p_hwfn, rc); return rc; } u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; if (!p_iov) { DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); return 0; } return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; } void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; if (!p_iov) { DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); return; } if (sb_id >= PFVF_MAX_SBS_PER_VF) { DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id); return; } p_iov->sbs_info[sb_id] = p_sb; } int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_bulletin_content shadow; u32 crc, crc_size; crc_size = sizeof(p_iov->bulletin.p_virt->crc); *p_change = 0; /* Need to guarantee PF is not in the middle of writing it */ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); /* If version did not update, no need to do anything */ if (shadow.version == p_iov->bulletin_shadow.version) return 0; /* Verify the bulletin we see is valid */ crc = crc32(0, (u8 *)&shadow + crc_size, p_iov->bulletin.size - crc_size); if (crc != shadow.crc) return -EAGAIN; /* Set the shadow bulletin and process it */ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Read a bulletin update %08x\n", shadow.version); *p_change = 1; return 0; } void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin) { memset(p_params, 0, sizeof(*p_params)); p_params->speed.autoneg = p_bulletin->req_autoneg; p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; p_params->speed.forced_speed = p_bulletin->req_forced_speed; p_params->pause.autoneg = p_bulletin->req_autoneg_pause; p_params->pause.forced_rx = p_bulletin->req_forced_rx; p_params->pause.forced_tx = p_bulletin->req_forced_tx; p_params->loopback_mode = p_bulletin->req_loopback; } void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) { __qed_vf_get_link_params(p_hwfn, params, &(p_hwfn->vf_iov_info->bulletin_shadow)); } void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin) { memset(p_link, 0, sizeof(*p_link)); p_link->link_up = p_bulletin->link_up; p_link->speed = p_bulletin->speed; p_link->full_duplex = p_bulletin->full_duplex; p_link->an = p_bulletin->autoneg; p_link->an_complete = p_bulletin->autoneg_complete; p_link->parallel_detection = p_bulletin->parallel_detection; p_link->pfc_enabled = p_bulletin->pfc_enabled; p_link->partner_adv_speed = p_bulletin->partner_adv_speed; p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; p_link->partner_adv_pause = p_bulletin->partner_adv_pause; p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; } void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link) { __qed_vf_get_link_state(p_hwfn, link, &(p_hwfn->vf_iov_info->bulletin_shadow)); } void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, struct qed_bulletin_content *p_bulletin) { memset(p_link_caps, 0, sizeof(*p_link_caps)); p_link_caps->speed_capabilities = p_bulletin->capability_speed; } void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps) { __qed_vf_get_link_caps(p_hwfn, p_link_caps, &(p_hwfn->vf_iov_info->bulletin_shadow)); } void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) { *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; } void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) { *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; } void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) { *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; } void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { memcpy(port_mac, p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); } void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) { struct qed_vf_iov *p_vf; p_vf = p_hwfn->vf_iov_info; *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; } void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) { struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; } bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) { struct qed_bulletin_content *bulletin; bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) return true; /* Forbid VF from changing a MAC enforced by PF */ if (ether_addr_equal(bulletin->mac, mac)) return false; return false; } static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, u8 *dst_mac, u8 *p_is_forced) { struct qed_bulletin_content *bulletin; bulletin = &hwfn->vf_iov_info->bulletin_shadow; if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { if (p_is_forced) *p_is_forced = 1; } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { if (p_is_forced) *p_is_forced = 0; } else { return false; } ether_addr_copy(dst_mac, bulletin->mac); return true; } static void qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, u16 *p_vxlan_port, u16 *p_geneve_port) { struct qed_bulletin_content *p_bulletin; p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; *p_vxlan_port = p_bulletin->vxlan_udp_port; *p_geneve_port = p_bulletin->geneve_udp_port; } void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) { struct pf_vf_pfdev_info *info; info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; *fw_major = info->fw_major; *fw_minor = info->fw_minor; *fw_rev = info->fw_rev; *fw_eng = info->fw_eng; } static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) { struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; void *cookie = hwfn->cdev->ops_cookie; u16 vxlan_port, geneve_port; qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, &is_mac_forced); if (is_mac_exist && cookie) ops->force_mac(cookie, mac, !!is_mac_forced); ops->ports_update(cookie, vxlan_port, geneve_port); /* Always update link configuration according to bulletin */ qed_link_update(hwfn, NULL); } void qed_iov_vf_task(struct work_struct *work) { struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, iov_task.work); u8 change = 0; if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) return; /* Handle bulletin board changes */ qed_vf_read_bulletin(hwfn, &change); if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, &hwfn->iov_task_flags)) change = 1; if (change) qed_handle_bulletin_change(hwfn); /* As VF is polling bulletin board, need to constantly re-schedule */ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_vf.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/types.h> #include <linux/crc8.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_iro_hsi.h" #include "qed_reg_addr.h" #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ }; static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ }; /* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 0x100) - 1 : 0) #define QM_INVALID_PQ_ID 0xffff /* Max link speed (in Mbps) */ #define QM_MAX_LINK_SPEED 100000 /* Feature enable */ #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 /* Initial VOQ byte credit */ #define QM_INITIAL_VOQ_BYTE_CRD 98304 /* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 /* VOQ constants */ #define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) #define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) /* WFQ constants */ /* PF WFQ increment value, 0x9000 = 4*9*1024 */ #define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ #define QM_PF_WFQ_UPPER_BOUND 62500000 /* PF WFQ max increment value, 0.7 * upper bound */ #define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ #define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 /* VP WFQ increment value */ #define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) /* VP WFQ min increment value */ #define QM_VP_WFQ_MIN_INC_VAL 10800 /* VP WFQ max increment value, 2^30 */ #define QM_VP_WFQ_MAX_INC_VAL 0x40000000 /* VP WFQ bypass threshold */ #define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) /* VP RL credit task cost */ #define QM_VP_RL_CRD_TASK_COST 9700 /* Bit of VOQ in VP WFQ PQ map */ #define QM_VP_WFQ_PQ_VOQ_SHIFT 0 /* Bit of PF in VP WFQ PQ map */ #define QM_VP_WFQ_PQ_PF_SHIFT 5 /* RL constants */ /* Period in us */ #define QM_RL_PERIOD 5 /* Period in 25MHz cycles */ #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps */ #define QM_RL_INC_VAL(rate) ({ \ typeof(rate) __rate = (rate); \ max_t(u32, \ (u32)(((__rate ? __rate : \ 100000) * \ QM_RL_PERIOD * \ 101) / (8 * 100)), 1); }) /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ #define QM_PF_RL_UPPER_BOUND 62500000 /* Max PF RL increment value is 0.7 * upper bound */ #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) /* QCN RL Upper bound, speed is in Mpbs */ #define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ u32, \ (u32)(((speed) * \ QM_RL_PERIOD * 101) / (8 * 100)), \ QM_VP_RL_CRD_TASK_COST \ + 1000)) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 #define QM_OPPOR_FW_STOP_DEF 0 #define QM_OPPOR_PQ_EMPTY_DEF 1 /* Command Queue constants */ /* Pure LB CmdQ lines (+spare) */ #define PBF_CMDQ_PURE_LB_LINES 150 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) /* Returns the VOQ line credit for the specified number of PBF command lines. * PBF lines are specified in 256b units. */ #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) /* BTB: blocks constants (block size = 256B) */ /* 256B blocks in 9700B packet */ #define BTB_JUMBO_PKT_BLOCKS 38 /* Headroom per-port */ #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS #define BTB_PURE_LB_FACTOR 10 /* Factored (hence really 0.7) */ #define BTB_PURE_LB_RATIO 7 /* QM stop command constants */ #define QM_STOP_PQ_MASK_WIDTH 32 #define QM_STOP_CMD_ADDR 2 #define QM_STOP_CMD_STRUCT_SIZE 2 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 #define QM_STOP_CMD_PAUSE_MASK_MASK -1 #define QM_STOP_CMD_GROUP_ID_OFFSET 1 #define QM_STOP_CMD_GROUP_ID_SHIFT 16 #define QM_STOP_CMD_GROUP_ID_MASK 15 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 #define QM_STOP_CMD_PQ_TYPE_MASK 1 #define QM_STOP_CMD_MAX_POLL_COUNT 100 #define QM_STOP_CMD_POLL_PERIOD_US 500 /* QM command macros */ #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE #define QM_CMD_SET_FIELD(var, cmd, field, value) \ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ cmd ## _ ## field, \ value) #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ u32 __reg = 0; \ \ BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ memset(&(map), 0, sizeof(map)); \ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ !!(rl_valid)); \ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ __reg); \ (map).reg = cpu_to_le32(__reg); \ } while (0) #define WRITE_PQ_INFO_TO_RAM 1 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \ (((rl) >> 8) << 9)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ XSTORM_PQ_INFO_OFFSET(pq_id)) static const char * const s_protocol_types[] = { "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE", "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP", "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON", "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI", }; static const char *s_ramrod_cmd_ids[][28] = { { "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC", "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC", "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN", "ISCSI_RAMROD_CMD_ID_UPDATE_CONN", "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN", "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE", "ISCSI_RAMROD_CMD_ID_CONN_STATS", }, { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC", "FCOE_RAMROD_CMD_ID_STAT_FUNC", "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN", "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", }, { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP", "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP", "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP", "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE", "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP", "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP", "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP", "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", }, { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START", "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP", "CORE_RAMROD_TX_QUEUE_STOP", "CORE_RAMROD_RX_QUEUE_FLUSH", "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", }, { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START", "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP", "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP", "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP", "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE", "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION", "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER", "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER", "ETH_RAMROD_RX_ADD_UDP_FILTER", "ETH_RAMROD_RX_DELETE_UDP_FILTER", "ETH_RAMROD_RX_CREATE_GFT_ACTION", "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE", "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL", "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL", "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", }, { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", "RDMA_RAMROD_STOP_NS_TRACKING", "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD", "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD", "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR", "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP", "IWARP_RAMROD_CMD_ID_MODIFY_QP", "IWARP_RAMROD_CMD_ID_DESTROY_QP", "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", }, { NULL }, /*TOE*/ { NULL }, /*PREROCE*/ { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START", "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START", "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE", "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", } }; /******************** INTERNAL IMPLEMENTATION *********************/ /* Returns the external VOQ number */ static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, u8 port_id, u8 tc, u8 max_phys_tcs_per_port) { if (tc == PURE_LB_TC) return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; else return port_id * max_phys_tcs_per_port + tc; } /* Prepare PF RL enable/disable runtime init values */ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { u8 num_ext_voqs = MAX_NUM_VOQS; u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; /* Enable RLs for all VOQs */ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask); /* Write RL period */ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_PF_RL_UPPER_BOUND); } } /* Prepare PF WFQ enable/disable runtime init values */ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); /* Set credit threshold for QM bypass flow */ if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_PF_WFQ_UPPER_BOUND); } /* Prepare global RL enable/disable runtime init values */ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, global_rl_en ? 1 : 0); if (global_rl_en) { /* Write RL period (use timer 0 only) */ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M); /* Set credit threshold for QM bypass flow */ if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); } } /* Prepare VPORT WFQ enable/disable runtime init values */ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); /* Set credit threshold for QM bypass flow */ if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_VP_WFQ_BYPASS_THRESH); } /* Prepare runtime init values to allocate PBF command queue lines for * the specified VOQ. */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, u8 ext_voq, u16 cmdq_lines) { u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); } /* Prepare runtime init values to allocate PBF command queue lines. */ static void qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, u8 max_ports_per_engine, u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u8 tc, ext_voq, port_id, num_tcs_in_port; u8 num_ext_voqs = MAX_NUM_VOQS; /* Clear PBF lines of all VOQs */ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); for (port_id = 0; port_id < max_ports_per_engine; port_id++) { u16 phys_lines, phys_lines_per_tc; if (!port_params[port_id].active) continue; /* Find number of command queue lines to divide between the * active physical TCs. */ phys_lines = port_params[port_id].num_pbf_cmd_lines; phys_lines -= PBF_CMDQ_PURE_LB_LINES; /* Find #lines per active physical TC */ num_tcs_in_port = 0; for (tc = 0; tc < max_phys_tcs_per_port; tc++) if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) num_tcs_in_port++; phys_lines_per_tc = phys_lines / num_tcs_in_port; /* Init registers per active TC */ for (tc = 0; tc < max_phys_tcs_per_port; tc++) { ext_voq = qed_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc); } /* Init registers for pure LB TC */ ext_voq = qed_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES); } } /* Prepare runtime init values to allocate guaranteed BTB blocks for the * specified port. The guaranteed BTB space is divided between the TCs as * follows (shared space Is currently not used): * 1. Parameters: * B - BTB blocks for this port * C - Number of physical TCs for this port * 2. Calculation: * a. 38 blocks (9700B jumbo frame) are allocated for global per port * headroom. * b. B = B - 38 (remainder after global headroom allocation). * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation). * e. B/C blocks are allocated for each physical TC. * Assumptions: * - MTU is up to 9700 bytes (38 blocks) * - All TCs are considered symmetrical (same rate and packet size) * - No optimization for lossy TC (all are considered lossless). Shared space * is not enabled and allocated for each TC. */ static void qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, u8 max_ports_per_engine, u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; u8 tc, ext_voq, port_id, num_tcs_in_port; for (port_id = 0; port_id < max_ports_per_engine; port_id++) { if (!port_params[port_id].active) continue; /* Subtract headroom blocks */ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; /* Find blocks per physical TC. Use factor to avoid floating * arithmethic. */ num_tcs_in_port = 0; for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) num_tcs_in_port++; pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port; /* Init physical TCs */ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) { ext_voq = qed_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET (ext_voq), phys_blocks); } } /* Init pure LB TC */ ext_voq = qed_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); } } /* Prepare runtime init values for the specified RL. * Set max link speed (100Gbps) per rate limiter. * Return -1 on error. */ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) { u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | (u32)QM_RL_CRD_REG_SIGN_BIT; u32 inc_val; u16 rl_id; /* Go over all global RLs */ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, upper_bound); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); } return 0; } /* Returns the upper bound for the specified Vport RL parameters. * link_speed is in Mbps. * Returns 0 in case of error. */ static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, u32 link_speed) { switch (vport_rl_type) { case QM_RL_TYPE_NORMAL: return QM_INITIAL_VOQ_BYTE_CRD; case QM_RL_TYPE_QCN: return QM_GLOBAL_RL_UPPER_BOUND(link_speed); default: return 0; } } /* Prepare VPORT RL runtime init values. * Return -1 on error. */ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, u16 start_rl, u16 num_rls, u32 link_speed, struct init_qm_rl_params *rl_params) { u16 i, rl_id; if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); return -1; } /* Go over all PF VPORTs */ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { u32 upper_bound, inc_val; upper_bound = qed_get_vport_rl_upper_bound((enum init_qm_rl_type) rl_params[i].vport_rl_type, link_speed); inc_val = QM_RL_INC_VAL(rl_params[i].vport_rl ? rl_params[i].vport_rl : link_speed); if (inc_val > upper_bound) { DP_NOTICE(p_hwfn, "Invalid RL rate - limit configuration\n"); return -1; } STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); } return 0; } /* Prepare Tx PQ mapping runtime init values for the specified PF */ static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_qm_pf_rt_init_params *p_params, u32 base_mem_addr_4kb) { u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; struct init_qm_vport_params *vport_params = p_params->vport_params; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; struct init_qm_pq_params *pq_params = p_params->pq_params; u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; last_pq_group = (p_params->start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); mem_addr_4kb = base_mem_addr_4kb; /* Set mapping from PQ group to PF */ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(p_params->pf_id)); /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_pf_cids)); STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_vf_cids)); /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u16 *p_first_tx_pq_id, vport_id_in_pf; struct qm_rf_pq_map tx_pq_map; u8 tc_id = pq_params[i].tc_id; bool is_vf_pq; u8 ext_voq; ext_voq = qed_get_ext_voq(p_hwfn, pq_params[i].port_id, tc_id, p_params->max_phys_tcs_per_port); is_vf_pq = (i >= p_params->num_pf_pqs); /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; p_first_tx_pq_id = &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { u32 map_val = (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); /* Create new VP PQ */ *p_first_tx_pq_id = pq_id; /* Map VP PQ to VOQ and PF */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + *p_first_tx_pq_id, map_val); } /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, *p_first_tx_pq_id, pq_params[i].rl_valid, pq_params[i].rl_id, ext_voq, pq_params[i].wrr_group); /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); /* Clear PQ pointer table entry (64 bit) */ if (p_params->is_pf_loading) for (j = 0; j < 2; j++) STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + (pq_id * 2) + j, 0); /* Write PQ info to RAM */ if (WRITE_PQ_INFO_TO_RAM != 0) { u32 pq_info = 0; pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, p_params->pf_id, tc_id, pq_params[i].port_id, pq_params[i].rl_valid, pq_params[i].rl_id); qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info); } /* If VF PQ, add indication to PQ VF mask */ if (is_vf_pq) { tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); mem_addr_4kb += vport_pq_mem_4kb; } else { mem_addr_4kb += pq_mem_4kb; } } /* Store Tx PQ VF mask to size select register */ for (i = 0; i < num_tx_pq_vf_masks; i++) if (tx_pq_vf_mask[i]) STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); return 0; } /* Prepare Other PQ mapping runtime init values for the specified PF */ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; u16 i, j, pq_id, pq_group; /* A single other PQ group is used in each PF, where PQ group i is used * in PF i. */ pq_group = pf_id; pq_size = num_pf_cids + num_tids; pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); mem_addr_4kb = base_mem_addr_4kb; /* Map PQ group to PF */ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); /* Clear PQ pointer table entry */ if (is_pf_loading) for (j = 0; j < 2; j++) STORE_RT_REG(p_hwfn, QM_REG_PTRTBLOTHER_RT_OFFSET + (pq_id * 2) + j, 0); mem_addr_4kb += pq_mem_4kb; } } /* Prepare PF WFQ runtime init values for the specified PF. * Return -1 on error. */ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; struct init_qm_pq_params *pq_params = p_params->pq_params; u32 inc_val, crd_reg_offset; u8 ext_voq; u16 i; inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } for (i = 0; i < num_tx_pqs; i++) { ext_voq = qed_get_ext_voq(p_hwfn, pq_params[i].port_id, pq_params[i].tc_id, p_params->max_phys_tcs_per_port); crd_reg_offset = (p_params->pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (p_params->pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); } STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); return 0; } /* Prepare PF RL runtime init values for the specified PF. * Return -1 on error. */ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); return 0; } /* Prepare VPORT WFQ runtime init values for the specified VPORTs. * Return -1 on error. */ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u16 num_vports, struct init_qm_vport_params *vport_params) { u16 vport_pq_id, wfq, i; u32 inc_val; u8 tc; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { /* Check if VPORT/TC is valid */ vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id == QM_INVALID_PQ_ID) continue; /* Find WFQ weight (per VPORT or per VPORT+TC) */ wfq = vport_params[i].wfq; wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; inc_val = QM_VP_WFQ_INC_VAL(wfq); if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n"); return -1; } /* Config registers */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + vport_pq_id, inc_val | QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); } } return 0; } static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 reg_val, i; for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { udelay(QM_STOP_CMD_POLL_PERIOD_US); reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); } /* Check if timeout while waiting for SDM command ready */ if (i == QM_STOP_CMD_MAX_POLL_COUNT) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Timeout when waiting for QM SDM command ready signal\n"); return false; } return true; } static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) { if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) return false; qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); } /******************** INTERFACE IMPLEMENTATION *********************/ u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) { return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; } int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params) { u32 mask = 0; /* Init AFullOprtnstcCrdMask */ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, QM_OPPOR_LINE_VOQ_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, p_params->global_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); /* Enable/disable PF RL */ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); /* Enable/disable PF WFQ */ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); /* Enable/disable global RL */ qed_enable_global_rl(p_hwfn, p_params->global_rl_en); /* Enable/disable VPORT WFQ */ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); /* Init PBF CMDQ line credit */ qed_cmdq_lines_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); /* Init BTB blocks in PBF */ qed_btb_blocks_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); qed_global_rl_rt_init(p_hwfn); return 0; } int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_qm_pf_rt_init_params *p_params) { struct init_qm_vport_params *vport_params = p_params->vport_params; u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + p_params->num_tids) * QM_OTHER_PQS_PER_PF; u16 i; u8 tc; /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; /* Map Other PQs (if any) */ qed_other_pq_map_rt_init(p_hwfn, p_params->pf_id, p_params->is_pf_loading, p_params->num_pf_cids, p_params->num_tids, 0); /* Map Tx PQs */ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) return -1; /* Init PF WFQ */ if (p_params->pf_wfq) if (qed_pf_wfq_rt_init(p_hwfn, p_params)) return -1; /* Init PF RL */ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; /* Init VPORT WFQ */ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; /* Set VPORT RL */ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, p_params->num_rls, p_params->link_speed, p_params->rl_params)) return -1; return 0; } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); return 0; } int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); return 0; } int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) { int result = 0; u16 vport_pq_id; u8 tc; for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, vport_pq_id, wfq); } return result; } int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id, u16 wfq) { u32 inc_val; if (first_tx_pq_id == QM_INVALID_PQ_ID) return -1; inc_val = QM_VP_WFQ_INC_VAL(wfq); if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, (u32)QM_WFQ_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, inc_val | QM_WFQ_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, inc_val); return 0; } int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, enum init_qm_rl_type vport_rl_type) { u32 inc_val, upper_bound; upper_bound = (vport_rl_type == QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : QM_INITIAL_VOQ_BYTE_CRD; inc_val = QM_RL_INC_VAL(rate_limit); if (inc_val > upper_bound) { DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLUPPERBOUND + rl_id * 4, upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); return 0; } bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_release_cmd, bool is_tx_pq, u16 start_pq, u16 num_pqs) { u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; u32 pq_mask = 0, last_pq, pq_id; last_pq = start_pq + num_pqs - 1; /* Set command's PQ type */ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); /* Go over requested PQs */ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { /* Set PQ bit in mask (stop command only) */ if (!is_release_cmd) pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); /* If last PQ or end of PQ mask, write command */ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK, pq_mask); QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1])) return false; pq_mask = 0; } } return true; } #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ do { \ typeof(var) *__p_var = &(var); \ typeof(offset) __offset = offset; \ *__p_var = (*__p_var & ~BIT(__offset)) | \ ((enable) ? BIT(__offset) : 0); \ } while (0) #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ do { \ u32 i; \ \ for (i = 0; i < (arr_size); i++) \ qed_wr(dev, ptt, \ ((addr) + (4 * i)), \ ((u32 *)&(arr))[i]); \ } while (0) /** * qed_dmae_to_grc() - Internal function for writing from host to * wide-bus registers (split registers are not supported yet). * * @p_hwfn: HW device data. * @p_ptt: PTT window used for writing the registers. * @p_data: Pointer to source data. * @addr: Destination register address. * @len_in_dwords: Data length in dwords (u32). * * Return: Length of the written data in dwords (u32) or -1 on invalid * input. */ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, __le32 *p_data, u32 addr, u32 len_in_dwords) { struct qed_dmae_params params = { 0 }; u32 *data_cpu; int rc; if (!p_data) return -1; /* Set DMAE params */ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); /* Execute DMAE command */ rc = qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)(p_data), addr, len_in_dwords, &params); /* If not read using DMAE, read using GRC */ if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Failed writing to chip using DMAE, using GRC instead\n"); /* Swap to CPU byteorder and write to registers using GRC */ data_cpu = (__force u32 *)p_data; le32_to_cpu_array(data_cpu, len_in_dwords); ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); cpu_to_le32_array(data_cpu, len_in_dwords); } return len_in_dwords; } void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); } void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable) { u32 reg_val; u8 shift; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); SET_FIELD(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); /* Update DORQ register */ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); } void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable) { u32 reg_val; u8 shift; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); SET_FIELD(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, eth_gre_enable); SET_FIELD(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); /* Update DORQ registers */ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); } void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); } void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable) { u32 reg_val; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); SET_FIELD(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, eth_geneve_enable); SET_FIELD(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); /* EDPM with geneve tunnel not supported in BB */ if (QED_IS_BB_B0(p_hwfn->cdev)) return; /* Update DORQ registers */ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, ip_geneve_enable ? 1 : 0); } #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) { u32 reg_val, cfg_mask; /* read PRS config register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); /* set VXLAN_NO_L2_ENABLE mask */ cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); if (enable) { /* set VXLAN_NO_L2_ENABLE flag */ reg_val |= cfg_mask; /* update PRS FIC register */ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); } else { /* clear VXLAN_NO_L2_ENABLE flag */ reg_val &= ~cfg_mask; } /* write PRS config register */ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); } #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 #define PARSER_ETH_CONN_CM_HDR 0 #define CAM_LINE_SIZE sizeof(u32) #define RAM_LINE_SIZE sizeof(u64) #define REG_SIZE sizeof(u32) void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { struct regpair ram_line = { 0 }; /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); /* Clean ram & cam for next gft session */ /* Zero camline */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); /* Zero ramline */ qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); } void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id, bool tcp, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { struct regpair ram_line; u32 search_non_ip_as_gft; u32 reg_val, cam_line; u32 lo = 0, hi = 0; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, "gft_config: must accept at least on of - ipv4 or ipv6'\n"); if (!tcp && !udp) DP_NOTICE(p_hwfn, "gft_config: must accept at least on of - udp or tcp\n"); if (profile_type >= MAX_GFT_PROFILE_TYPE) DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); /* Set RFS event ID to be awakened i Tstorm By Prs */ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); /* Do not load context only cid in PRS on match. */ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* Do not use tenant ID exist bit for gft search */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); /* Set Cam */ cam_line = 0; SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); /* Filters are per PF!! */ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); if (!(tcp && udp)) { SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); if (tcp) SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); else SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); } if (!(ipv4 && ipv6)) { SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); if (ipv4) SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); else SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); } /* Write characteristics to cam */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, cam_line); cam_line = qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); /* Write line to RAM - compare to filter 4 tuple */ /* Search no IP as GFT */ search_non_ip_as_gft = 0; /* Tunnel type */ SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); /* Allow tunneled traffic without inner IP */ search_non_ip_as_gft = 1; } ram_line.lo = cpu_to_le32(lo); ram_line.hi = cpu_to_le32(hi); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); /* Set default profile so that no filter match will happen */ ram_line.lo = cpu_to_le32(0xffffffff); ram_line.hi = cpu_to_le32(0x3ff); qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, sizeof(ram_line) / REG_SIZE); /* Enable gft search */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); } DECLARE_CRC8_TABLE(cdu_crc8_table); /* Calculate and return CDU validation byte per connection type/region/cid */ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) { const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; u8 crc, validation_byte = 0; static u8 crc8_table_valid; /* automatically initialized to 0 */ u32 validation_string = 0; __be32 data_to_crc; if (!crc8_table_valid) { crc8_populate_msb(cdu_crc8_table, 0x07); crc8_table_valid = 1; } /* The CRC is calculated on the String-to-compress: * [31:8] = {CID[31:20],CID[11:0]} * [7:4] = Region * [3:0] = Type */ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) validation_string |= ((region & 0xF) << 4); if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) validation_string |= (conn_type & 0xF); /* Convert to big-endian and calculate CRC8 */ data_to_crc = cpu_to_be32(validation_string); crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); /* The validation byte [7:0] is composed: * for type A validation * [7] = active configuration bit * [6:0] = crc[6:0] * * for type B validation * [7] = active configuration bit * [6:3] = connection_type[3:0] * [2:0] = crc[2:0] */ validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); else validation_byte |= crc & 0x7F; return validation_byte; } /* Calcualte and set validation bytes for session context */ void qed_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid) { u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; p_ctx = (u8 * const)p_ctx_mem; x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; memset(p_ctx, 0, ctx_size); *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); } /* Calcualte and set validation bytes for task context */ void qed_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid) { u8 *p_ctx, *region1_val_ptr; p_ctx = (u8 * const)p_ctx_mem; region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; memset(p_ctx, 0, ctx_size); *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); } /* Memset session context to 0 while preserving validation bytes */ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) { u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; u8 x_val, t_val, u_val; p_ctx = (u8 * const)p_ctx_mem; x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; x_val = *x_val_ptr; t_val = *t_val_ptr; u_val = *u_val_ptr; memset(p_ctx, 0, ctx_size); *x_val_ptr = x_val; *t_val_ptr = t_val; *u_val_ptr = u_val; } /* Memset task context to 0 while preserving validation bytes */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) { u8 *p_ctx, *region1_val_ptr; u8 region1_val; p_ctx = (u8 * const)p_ctx_mem; region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; region1_val = *region1_val_ptr; memset(p_ctx, 0, ctx_size); *region1_val_ptr = region1_val; } /* Enable and configure context validation */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 ctx_validation; /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } const char *qed_get_protocol_type_str(u32 protocol_type) { if (protocol_type >= ARRAY_SIZE(s_protocol_types)) return "Invalid protocol type"; return s_protocol_types[protocol_type]; } const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id) { const char *ramrod_cmd_id_str; if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids)) return "Invalid protocol type"; if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0])) return "Invalid Ramrod command ID"; ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id]; if (!ramrod_cmd_id_str) return "Invalid Ramrod command ID"; return ramrod_cmd_id_str; } static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) { switch (storm_id) { case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); default: return 0; } } void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]) { u8 storm_id; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); } } #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) { switch (storm_id) { case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + TSTORM_OVERLAY_BUF_ADDR_OFFSET; case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + MSTORM_OVERLAY_BUF_ADDR_OFFSET; case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + USTORM_OVERLAY_BUF_ADDR_OFFSET; case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + XSTORM_OVERLAY_BUF_ADDR_OFFSET; case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + YSTORM_OVERLAY_BUF_ADDR_OFFSET; case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + PSTORM_OVERLAY_BUF_ADDR_OFFSET; default: return 0; } } struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, const u32 * const fw_overlay_in_buf, u32 buf_size_in_bytes) { u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; struct phys_mem_desc *allocated_mem; if (!buf_size) return NULL; allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc), GFP_KERNEL); if (!allocated_mem) return NULL; /* For each Storm, set physical address in RAM */ while (buf_offset < buf_size) { struct phys_mem_desc *storm_mem_desc; struct fw_overlay_buf_hdr *hdr; u32 storm_buf_size; u8 storm_id; hdr = (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; storm_buf_size = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_BUF_SIZE); storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); if (storm_id >= NUM_STORMS) break; storm_mem_desc = allocated_mem + storm_id; storm_mem_desc->size = storm_buf_size * sizeof(u32); /* Allocate physical memory for Storm's overlays buffer */ storm_mem_desc->virt_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, storm_mem_desc->size, &storm_mem_desc->phys_addr, GFP_KERNEL); if (!storm_mem_desc->virt_addr) break; /* Skip overlays buffer header */ buf_offset += OVERLAY_HDR_SIZE_DWORDS; /* Copy Storm's overlays buffer to allocated memory */ memcpy(storm_mem_desc->virt_addr, &fw_overlay_in_buf[buf_offset], storm_mem_desc->size); /* Advance to next Storm */ buf_offset += storm_buf_size; } /* If memory allocation has failed, free all allocated memory */ if (buf_offset < buf_size) { qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); return NULL; } return allocated_mem; } void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem) { u8 storm_id; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { struct phys_mem_desc *storm_mem_desc = (struct phys_mem_desc *)fw_overlay_mem + storm_id; u32 ram_addr, i; /* Skip Storms with no FW overlays */ if (!storm_mem_desc->virt_addr) continue; /* Calculate overlay RAM GRC address of current PF */ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) + sizeof(dma_addr_t) * p_hwfn->rel_pf_id; /* Write Storm's overlay physical address to RAM */ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) qed_wr(p_hwfn, p_ptt, ram_addr, ((u32 *)&storm_mem_desc->phys_addr)[i]); } } void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, struct phys_mem_desc **fw_overlay_mem) { u8 storm_id; if (!fw_overlay_mem || !(*fw_overlay_mem)) return; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { struct phys_mem_desc *storm_mem_desc = (struct phys_mem_desc *)*fw_overlay_mem + storm_id; /* Free Storm's physical memory */ if (storm_mem_desc->virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, storm_mem_desc->size, storm_mem_desc->virt_addr, storm_mem_desc->phys_addr); } /* Free allocated virtual memory */ kfree(*fw_overlay_mem); *fw_overlay_mem = NULL; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include "qed.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #define TLV_TYPE(p) (p[0]) #define TLV_LENGTH(p) (p[1]) #define TLV_FLAGS(p) (p[3]) #define QED_TLV_DATA_MAX (14) struct qed_tlv_parsed_buf { /* To be filled with the address to set in Value field */ void *p_val; /* To be used internally in case the value has to be modified */ u8 data[QED_TLV_DATA_MAX]; }; static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) { switch (tlv_type) { case DRV_TLV_FEATURE_FLAGS: case DRV_TLV_LOCAL_ADMIN_ADDR: case DRV_TLV_ADDITIONAL_MAC_ADDR_1: case DRV_TLV_ADDITIONAL_MAC_ADDR_2: case DRV_TLV_OS_DRIVER_STATES: case DRV_TLV_PXE_BOOT_PROGRESS: case DRV_TLV_RX_FRAMES_RECEIVED: case DRV_TLV_RX_BYTES_RECEIVED: case DRV_TLV_TX_FRAMES_SENT: case DRV_TLV_TX_BYTES_SENT: case DRV_TLV_NPIV_ENABLED: case DRV_TLV_PCIE_BUS_RX_UTILIZATION: case DRV_TLV_PCIE_BUS_TX_UTILIZATION: case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION: case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED: case DRV_TLV_NCSI_RX_BYTES_RECEIVED: case DRV_TLV_NCSI_TX_BYTES_SENT: *tlv_group |= QED_MFW_TLV_GENERIC; break; case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: case DRV_TLV_LSO_MIN_SEGMENT_COUNT: case DRV_TLV_PROMISCUOUS_MODE: case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: case DRV_TLV_IOV_OFFLOAD: case DRV_TLV_TX_QUEUES_EMPTY: case DRV_TLV_RX_QUEUES_EMPTY: case DRV_TLV_TX_QUEUES_FULL: case DRV_TLV_RX_QUEUES_FULL: *tlv_group |= QED_MFW_TLV_ETH; break; case DRV_TLV_SCSI_TO: case DRV_TLV_R_T_TOV: case DRV_TLV_R_A_TOV: case DRV_TLV_E_D_TOV: case DRV_TLV_CR_TOV: case DRV_TLV_BOOT_TYPE: case DRV_TLV_NPIV_STATE: case DRV_TLV_NUM_OF_NPIV_IDS: case DRV_TLV_SWITCH_NAME: case DRV_TLV_SWITCH_PORT_NUM: case DRV_TLV_SWITCH_PORT_ID: case DRV_TLV_VENDOR_NAME: case DRV_TLV_SWITCH_MODEL: case DRV_TLV_SWITCH_FW_VER: case DRV_TLV_QOS_PRIORITY_PER_802_1P: case DRV_TLV_PORT_ALIAS: case DRV_TLV_PORT_STATE: case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_LINK_FAILURE_COUNT: case DRV_TLV_FCOE_BOOT_PROGRESS: case DRV_TLV_RX_BROADCAST_PACKETS: case DRV_TLV_TX_BROADCAST_PACKETS: case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: case DRV_TLV_FCOE_RX_BYTES_RECEIVED: case DRV_TLV_FCOE_TX_FRAMES_SENT: case DRV_TLV_FCOE_TX_BYTES_SENT: case DRV_TLV_CRC_ERROR_COUNT: case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_1_TIMESTAMP: case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_2_TIMESTAMP: case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_3_TIMESTAMP: case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_4_TIMESTAMP: case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_5_TIMESTAMP: case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: case DRV_TLV_DISPARITY_ERROR_COUNT: case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: case DRV_TLV_LAST_FLOGI_TIMESTAMP: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: case DRV_TLV_LAST_FLOGI_RJT: case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: case DRV_TLV_FDISCS_SENT_COUNT: case DRV_TLV_FDISC_ACCS_RECEIVED: case DRV_TLV_FDISC_RJTS_RECEIVED: case DRV_TLV_PLOGI_SENT_COUNT: case DRV_TLV_PLOGI_ACCS_RECEIVED: case DRV_TLV_PLOGI_RJTS_RECEIVED: case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_1_TIMESTAMP: case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_2_TIMESTAMP: case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_3_TIMESTAMP: case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_4_TIMESTAMP: case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_5_TIMESTAMP: case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: case DRV_TLV_LOGOS_ISSUED: case DRV_TLV_LOGO_ACCS_RECEIVED: case DRV_TLV_LOGO_RJTS_RECEIVED: case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_1_TIMESTAMP: case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_2_TIMESTAMP: case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_3_TIMESTAMP: case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_4_TIMESTAMP: case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_5_TIMESTAMP: case DRV_TLV_LOGOS_RECEIVED: case DRV_TLV_ACCS_ISSUED: case DRV_TLV_PRLIS_ISSUED: case DRV_TLV_ACCS_RECEIVED: case DRV_TLV_ABTS_SENT_COUNT: case DRV_TLV_ABTS_ACCS_RECEIVED: case DRV_TLV_ABTS_RJTS_RECEIVED: case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_1_TIMESTAMP: case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_2_TIMESTAMP: case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_3_TIMESTAMP: case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_4_TIMESTAMP: case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_5_TIMESTAMP: case DRV_TLV_RSCNS_RECEIVED: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: case DRV_TLV_LUN_RESETS_ISSUED: case DRV_TLV_ABORT_TASK_SETS_ISSUED: case DRV_TLV_TPRLOS_SENT: case DRV_TLV_NOS_SENT_COUNT: case DRV_TLV_NOS_RECEIVED_COUNT: case DRV_TLV_OLS_COUNT: case DRV_TLV_LR_COUNT: case DRV_TLV_LRR_COUNT: case DRV_TLV_LIP_SENT_COUNT: case DRV_TLV_LIP_RECEIVED_COUNT: case DRV_TLV_EOFA_COUNT: case DRV_TLV_EOFNI_COUNT: case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: case DRV_TLV_SCSI_STATUS_BUSY_COUNT: case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: *tlv_group = QED_MFW_TLV_FCOE; break; case DRV_TLV_TARGET_LLMNR_ENABLED: case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: case DRV_TLV_AUTHENTICATION_METHOD: case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: case DRV_TLV_MAX_FRAME_SIZE: case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: case DRV_TLV_ISCSI_BOOT_PROGRESS: case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: *tlv_group |= QED_MFW_TLV_ISCSI; break; default: return -EINVAL; } return 0; } /* Returns size of the data buffer or, -1 in case TLV data is not available. */ static int qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv, struct qed_mfw_tlv_generic *p_drv_buf, struct qed_tlv_parsed_buf *p_buf) { switch (p_tlv->tlv_type) { case DRV_TLV_FEATURE_FLAGS: if (p_drv_buf->flags.b_set) { memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ? 1 : 0; p_buf->data[0] |= (p_drv_buf->flags.lso_supported ? 1 : 0) << 1; p_buf->p_val = p_buf->data; return QED_MFW_TLV_FLAGS_SIZE; } break; case DRV_TLV_LOCAL_ADMIN_ADDR: case DRV_TLV_ADDITIONAL_MAC_ADDR_1: case DRV_TLV_ADDITIONAL_MAC_ADDR_2: { int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR; if (p_drv_buf->mac_set[idx]) { p_buf->p_val = p_drv_buf->mac[idx]; return ETH_ALEN; } break; } case DRV_TLV_RX_FRAMES_RECEIVED: if (p_drv_buf->rx_frames_set) { p_buf->p_val = &p_drv_buf->rx_frames; return sizeof(p_drv_buf->rx_frames); } break; case DRV_TLV_RX_BYTES_RECEIVED: if (p_drv_buf->rx_bytes_set) { p_buf->p_val = &p_drv_buf->rx_bytes; return sizeof(p_drv_buf->rx_bytes); } break; case DRV_TLV_TX_FRAMES_SENT: if (p_drv_buf->tx_frames_set) { p_buf->p_val = &p_drv_buf->tx_frames; return sizeof(p_drv_buf->tx_frames); } break; case DRV_TLV_TX_BYTES_SENT: if (p_drv_buf->tx_bytes_set) { p_buf->p_val = &p_drv_buf->tx_bytes; return sizeof(p_drv_buf->tx_bytes); } break; default: break; } return -1; } static int qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, struct qed_mfw_tlv_eth *p_drv_buf, struct qed_tlv_parsed_buf *p_buf) { switch (p_tlv->tlv_type) { case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: if (p_drv_buf->lso_maxoff_size_set) { p_buf->p_val = &p_drv_buf->lso_maxoff_size; return sizeof(p_drv_buf->lso_maxoff_size); } break; case DRV_TLV_LSO_MIN_SEGMENT_COUNT: if (p_drv_buf->lso_minseg_size_set) { p_buf->p_val = &p_drv_buf->lso_minseg_size; return sizeof(p_drv_buf->lso_minseg_size); } break; case DRV_TLV_PROMISCUOUS_MODE: if (p_drv_buf->prom_mode_set) { p_buf->p_val = &p_drv_buf->prom_mode; return sizeof(p_drv_buf->prom_mode); } break; case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->tx_descr_size_set) { p_buf->p_val = &p_drv_buf->tx_descr_size; return sizeof(p_drv_buf->tx_descr_size); } break; case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->rx_descr_size_set) { p_buf->p_val = &p_drv_buf->rx_descr_size; return sizeof(p_drv_buf->rx_descr_size); } break; case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: if (p_drv_buf->netq_count_set) { p_buf->p_val = &p_drv_buf->netq_count; return sizeof(p_drv_buf->netq_count); } break; case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: if (p_drv_buf->tcp4_offloads_set) { p_buf->p_val = &p_drv_buf->tcp4_offloads; return sizeof(p_drv_buf->tcp4_offloads); } break; case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: if (p_drv_buf->tcp6_offloads_set) { p_buf->p_val = &p_drv_buf->tcp6_offloads; return sizeof(p_drv_buf->tcp6_offloads); } break; case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: if (p_drv_buf->tx_descr_qdepth_set) { p_buf->p_val = &p_drv_buf->tx_descr_qdepth; return sizeof(p_drv_buf->tx_descr_qdepth); } break; case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: if (p_drv_buf->rx_descr_qdepth_set) { p_buf->p_val = &p_drv_buf->rx_descr_qdepth; return sizeof(p_drv_buf->rx_descr_qdepth); } break; case DRV_TLV_IOV_OFFLOAD: if (p_drv_buf->iov_offload_set) { p_buf->p_val = &p_drv_buf->iov_offload; return sizeof(p_drv_buf->iov_offload); } break; case DRV_TLV_TX_QUEUES_EMPTY: if (p_drv_buf->txqs_empty_set) { p_buf->p_val = &p_drv_buf->txqs_empty; return sizeof(p_drv_buf->txqs_empty); } break; case DRV_TLV_RX_QUEUES_EMPTY: if (p_drv_buf->rxqs_empty_set) { p_buf->p_val = &p_drv_buf->rxqs_empty; return sizeof(p_drv_buf->rxqs_empty); } break; case DRV_TLV_TX_QUEUES_FULL: if (p_drv_buf->num_txqs_full_set) { p_buf->p_val = &p_drv_buf->num_txqs_full; return sizeof(p_drv_buf->num_txqs_full); } break; case DRV_TLV_RX_QUEUES_FULL: if (p_drv_buf->num_rxqs_full_set) { p_buf->p_val = &p_drv_buf->num_rxqs_full; return sizeof(p_drv_buf->num_rxqs_full); } break; default: break; } return -1; } static int qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, struct qed_tlv_parsed_buf *p_buf) { if (!p_time->b_set) return -1; /* Validate numbers */ if (p_time->month > 12) p_time->month = 0; if (p_time->day > 31) p_time->day = 0; if (p_time->hour > 23) p_time->hour = 0; if (p_time->min > 59) p_time->min = 0; if (p_time->msec > 999) p_time->msec = 0; if (p_time->usec > 999) p_time->usec = 0; memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); snprintf(p_buf->data, 14, "%d%d%d%d%d%d", p_time->month, p_time->day, p_time->hour, p_time->min, p_time->msec, p_time->usec); p_buf->p_val = p_buf->data; return QED_MFW_TLV_TIME_SIZE; } static int qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, struct qed_mfw_tlv_fcoe *p_drv_buf, struct qed_tlv_parsed_buf *p_buf) { struct qed_mfw_tlv_time *p_time; u8 idx; switch (p_tlv->tlv_type) { case DRV_TLV_SCSI_TO: if (p_drv_buf->scsi_timeout_set) { p_buf->p_val = &p_drv_buf->scsi_timeout; return sizeof(p_drv_buf->scsi_timeout); } break; case DRV_TLV_R_T_TOV: if (p_drv_buf->rt_tov_set) { p_buf->p_val = &p_drv_buf->rt_tov; return sizeof(p_drv_buf->rt_tov); } break; case DRV_TLV_R_A_TOV: if (p_drv_buf->ra_tov_set) { p_buf->p_val = &p_drv_buf->ra_tov; return sizeof(p_drv_buf->ra_tov); } break; case DRV_TLV_E_D_TOV: if (p_drv_buf->ed_tov_set) { p_buf->p_val = &p_drv_buf->ed_tov; return sizeof(p_drv_buf->ed_tov); } break; case DRV_TLV_CR_TOV: if (p_drv_buf->cr_tov_set) { p_buf->p_val = &p_drv_buf->cr_tov; return sizeof(p_drv_buf->cr_tov); } break; case DRV_TLV_BOOT_TYPE: if (p_drv_buf->boot_type_set) { p_buf->p_val = &p_drv_buf->boot_type; return sizeof(p_drv_buf->boot_type); } break; case DRV_TLV_NPIV_STATE: if (p_drv_buf->npiv_state_set) { p_buf->p_val = &p_drv_buf->npiv_state; return sizeof(p_drv_buf->npiv_state); } break; case DRV_TLV_NUM_OF_NPIV_IDS: if (p_drv_buf->num_npiv_ids_set) { p_buf->p_val = &p_drv_buf->num_npiv_ids; return sizeof(p_drv_buf->num_npiv_ids); } break; case DRV_TLV_SWITCH_NAME: if (p_drv_buf->switch_name_set) { p_buf->p_val = &p_drv_buf->switch_name; return sizeof(p_drv_buf->switch_name); } break; case DRV_TLV_SWITCH_PORT_NUM: if (p_drv_buf->switch_portnum_set) { p_buf->p_val = &p_drv_buf->switch_portnum; return sizeof(p_drv_buf->switch_portnum); } break; case DRV_TLV_SWITCH_PORT_ID: if (p_drv_buf->switch_portid_set) { p_buf->p_val = &p_drv_buf->switch_portid; return sizeof(p_drv_buf->switch_portid); } break; case DRV_TLV_VENDOR_NAME: if (p_drv_buf->vendor_name_set) { p_buf->p_val = &p_drv_buf->vendor_name; return sizeof(p_drv_buf->vendor_name); } break; case DRV_TLV_SWITCH_MODEL: if (p_drv_buf->switch_model_set) { p_buf->p_val = &p_drv_buf->switch_model; return sizeof(p_drv_buf->switch_model); } break; case DRV_TLV_SWITCH_FW_VER: if (p_drv_buf->switch_fw_version_set) { p_buf->p_val = &p_drv_buf->switch_fw_version; return sizeof(p_drv_buf->switch_fw_version); } break; case DRV_TLV_QOS_PRIORITY_PER_802_1P: if (p_drv_buf->qos_pri_set) { p_buf->p_val = &p_drv_buf->qos_pri; return sizeof(p_drv_buf->qos_pri); } break; case DRV_TLV_PORT_ALIAS: if (p_drv_buf->port_alias_set) { p_buf->p_val = &p_drv_buf->port_alias; return sizeof(p_drv_buf->port_alias); } break; case DRV_TLV_PORT_STATE: if (p_drv_buf->port_state_set) { p_buf->p_val = &p_drv_buf->port_state; return sizeof(p_drv_buf->port_state); } break; case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->fip_tx_descr_size_set) { p_buf->p_val = &p_drv_buf->fip_tx_descr_size; return sizeof(p_drv_buf->fip_tx_descr_size); } break; case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->fip_rx_descr_size_set) { p_buf->p_val = &p_drv_buf->fip_rx_descr_size; return sizeof(p_drv_buf->fip_rx_descr_size); } break; case DRV_TLV_LINK_FAILURE_COUNT: if (p_drv_buf->link_failures_set) { p_buf->p_val = &p_drv_buf->link_failures; return sizeof(p_drv_buf->link_failures); } break; case DRV_TLV_FCOE_BOOT_PROGRESS: if (p_drv_buf->fcoe_boot_progress_set) { p_buf->p_val = &p_drv_buf->fcoe_boot_progress; return sizeof(p_drv_buf->fcoe_boot_progress); } break; case DRV_TLV_RX_BROADCAST_PACKETS: if (p_drv_buf->rx_bcast_set) { p_buf->p_val = &p_drv_buf->rx_bcast; return sizeof(p_drv_buf->rx_bcast); } break; case DRV_TLV_TX_BROADCAST_PACKETS: if (p_drv_buf->tx_bcast_set) { p_buf->p_val = &p_drv_buf->tx_bcast; return sizeof(p_drv_buf->tx_bcast); } break; case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: if (p_drv_buf->fcoe_txq_depth_set) { p_buf->p_val = &p_drv_buf->fcoe_txq_depth; return sizeof(p_drv_buf->fcoe_txq_depth); } break; case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: if (p_drv_buf->fcoe_rxq_depth_set) { p_buf->p_val = &p_drv_buf->fcoe_rxq_depth; return sizeof(p_drv_buf->fcoe_rxq_depth); } break; case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: if (p_drv_buf->fcoe_rx_frames_set) { p_buf->p_val = &p_drv_buf->fcoe_rx_frames; return sizeof(p_drv_buf->fcoe_rx_frames); } break; case DRV_TLV_FCOE_RX_BYTES_RECEIVED: if (p_drv_buf->fcoe_rx_bytes_set) { p_buf->p_val = &p_drv_buf->fcoe_rx_bytes; return sizeof(p_drv_buf->fcoe_rx_bytes); } break; case DRV_TLV_FCOE_TX_FRAMES_SENT: if (p_drv_buf->fcoe_tx_frames_set) { p_buf->p_val = &p_drv_buf->fcoe_tx_frames; return sizeof(p_drv_buf->fcoe_tx_frames); } break; case DRV_TLV_FCOE_TX_BYTES_SENT: if (p_drv_buf->fcoe_tx_bytes_set) { p_buf->p_val = &p_drv_buf->fcoe_tx_bytes; return sizeof(p_drv_buf->fcoe_tx_bytes); } break; case DRV_TLV_CRC_ERROR_COUNT: if (p_drv_buf->crc_count_set) { p_buf->p_val = &p_drv_buf->crc_count; return sizeof(p_drv_buf->crc_count); } break; case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2; if (p_drv_buf->crc_err_src_fcid_set[idx]) { p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx]; return sizeof(p_drv_buf->crc_err_src_fcid[idx]); } break; case DRV_TLV_CRC_ERROR_1_TIMESTAMP: case DRV_TLV_CRC_ERROR_2_TIMESTAMP: case DRV_TLV_CRC_ERROR_3_TIMESTAMP: case DRV_TLV_CRC_ERROR_4_TIMESTAMP: case DRV_TLV_CRC_ERROR_5_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2; return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx], p_buf); case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: if (p_drv_buf->losync_err_set) { p_buf->p_val = &p_drv_buf->losync_err; return sizeof(p_drv_buf->losync_err); } break; case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: if (p_drv_buf->losig_err_set) { p_buf->p_val = &p_drv_buf->losig_err; return sizeof(p_drv_buf->losig_err); } break; case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: if (p_drv_buf->primtive_err_set) { p_buf->p_val = &p_drv_buf->primtive_err; return sizeof(p_drv_buf->primtive_err); } break; case DRV_TLV_DISPARITY_ERROR_COUNT: if (p_drv_buf->disparity_err_set) { p_buf->p_val = &p_drv_buf->disparity_err; return sizeof(p_drv_buf->disparity_err); } break; case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: if (p_drv_buf->code_violation_err_set) { p_buf->p_val = &p_drv_buf->code_violation_err; return sizeof(p_drv_buf->code_violation_err); } break; case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: idx = p_tlv->tlv_type - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1; if (p_drv_buf->flogi_param_set[idx]) { p_buf->p_val = &p_drv_buf->flogi_param[idx]; return sizeof(p_drv_buf->flogi_param[idx]); } break; case DRV_TLV_LAST_FLOGI_TIMESTAMP: return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp, p_buf); case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: idx = p_tlv->tlv_type - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1; if (p_drv_buf->flogi_acc_param_set[idx]) { p_buf->p_val = &p_drv_buf->flogi_acc_param[idx]; return sizeof(p_drv_buf->flogi_acc_param[idx]); } break; case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp, p_buf); case DRV_TLV_LAST_FLOGI_RJT: if (p_drv_buf->flogi_rjt_set) { p_buf->p_val = &p_drv_buf->flogi_rjt; return sizeof(p_drv_buf->flogi_rjt); } break; case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp, p_buf); case DRV_TLV_FDISCS_SENT_COUNT: if (p_drv_buf->fdiscs_set) { p_buf->p_val = &p_drv_buf->fdiscs; return sizeof(p_drv_buf->fdiscs); } break; case DRV_TLV_FDISC_ACCS_RECEIVED: if (p_drv_buf->fdisc_acc_set) { p_buf->p_val = &p_drv_buf->fdisc_acc; return sizeof(p_drv_buf->fdisc_acc); } break; case DRV_TLV_FDISC_RJTS_RECEIVED: if (p_drv_buf->fdisc_rjt_set) { p_buf->p_val = &p_drv_buf->fdisc_rjt; return sizeof(p_drv_buf->fdisc_rjt); } break; case DRV_TLV_PLOGI_SENT_COUNT: if (p_drv_buf->plogi_set) { p_buf->p_val = &p_drv_buf->plogi; return sizeof(p_drv_buf->plogi); } break; case DRV_TLV_PLOGI_ACCS_RECEIVED: if (p_drv_buf->plogi_acc_set) { p_buf->p_val = &p_drv_buf->plogi_acc; return sizeof(p_drv_buf->plogi_acc); } break; case DRV_TLV_PLOGI_RJTS_RECEIVED: if (p_drv_buf->plogi_rjt_set) { p_buf->p_val = &p_drv_buf->plogi_rjt; return sizeof(p_drv_buf->plogi_rjt); } break; case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2; if (p_drv_buf->plogi_dst_fcid_set[idx]) { p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx]; return sizeof(p_drv_buf->plogi_dst_fcid[idx]); } break; case DRV_TLV_PLOGI_1_TIMESTAMP: case DRV_TLV_PLOGI_2_TIMESTAMP: case DRV_TLV_PLOGI_3_TIMESTAMP: case DRV_TLV_PLOGI_4_TIMESTAMP: case DRV_TLV_PLOGI_5_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2; return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx], p_buf); case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2; if (p_drv_buf->plogi_acc_src_fcid_set[idx]) { p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx]; return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]); } break; case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2; p_time = &p_drv_buf->plogi_acc_tstamp[idx]; return qed_mfw_get_tlv_time_value(p_time, p_buf); case DRV_TLV_LOGOS_ISSUED: if (p_drv_buf->tx_plogos_set) { p_buf->p_val = &p_drv_buf->tx_plogos; return sizeof(p_drv_buf->tx_plogos); } break; case DRV_TLV_LOGO_ACCS_RECEIVED: if (p_drv_buf->plogo_acc_set) { p_buf->p_val = &p_drv_buf->plogo_acc; return sizeof(p_drv_buf->plogo_acc); } break; case DRV_TLV_LOGO_RJTS_RECEIVED: if (p_drv_buf->plogo_rjt_set) { p_buf->p_val = &p_drv_buf->plogo_rjt; return sizeof(p_drv_buf->plogo_rjt); } break; case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) / 2; if (p_drv_buf->plogo_src_fcid_set[idx]) { p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx]; return sizeof(p_drv_buf->plogo_src_fcid[idx]); } break; case DRV_TLV_LOGO_1_TIMESTAMP: case DRV_TLV_LOGO_2_TIMESTAMP: case DRV_TLV_LOGO_3_TIMESTAMP: case DRV_TLV_LOGO_4_TIMESTAMP: case DRV_TLV_LOGO_5_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2; return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx], p_buf); case DRV_TLV_LOGOS_RECEIVED: if (p_drv_buf->rx_logos_set) { p_buf->p_val = &p_drv_buf->rx_logos; return sizeof(p_drv_buf->rx_logos); } break; case DRV_TLV_ACCS_ISSUED: if (p_drv_buf->tx_accs_set) { p_buf->p_val = &p_drv_buf->tx_accs; return sizeof(p_drv_buf->tx_accs); } break; case DRV_TLV_PRLIS_ISSUED: if (p_drv_buf->tx_prlis_set) { p_buf->p_val = &p_drv_buf->tx_prlis; return sizeof(p_drv_buf->tx_prlis); } break; case DRV_TLV_ACCS_RECEIVED: if (p_drv_buf->rx_accs_set) { p_buf->p_val = &p_drv_buf->rx_accs; return sizeof(p_drv_buf->rx_accs); } break; case DRV_TLV_ABTS_SENT_COUNT: if (p_drv_buf->tx_abts_set) { p_buf->p_val = &p_drv_buf->tx_abts; return sizeof(p_drv_buf->tx_abts); } break; case DRV_TLV_ABTS_ACCS_RECEIVED: if (p_drv_buf->rx_abts_acc_set) { p_buf->p_val = &p_drv_buf->rx_abts_acc; return sizeof(p_drv_buf->rx_abts_acc); } break; case DRV_TLV_ABTS_RJTS_RECEIVED: if (p_drv_buf->rx_abts_rjt_set) { p_buf->p_val = &p_drv_buf->rx_abts_rjt; return sizeof(p_drv_buf->rx_abts_rjt); } break; case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2; if (p_drv_buf->abts_dst_fcid_set[idx]) { p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx]; return sizeof(p_drv_buf->abts_dst_fcid[idx]); } break; case DRV_TLV_ABTS_1_TIMESTAMP: case DRV_TLV_ABTS_2_TIMESTAMP: case DRV_TLV_ABTS_3_TIMESTAMP: case DRV_TLV_ABTS_4_TIMESTAMP: case DRV_TLV_ABTS_5_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2; return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx], p_buf); case DRV_TLV_RSCNS_RECEIVED: if (p_drv_buf->rx_rscn_set) { p_buf->p_val = &p_drv_buf->rx_rscn; return sizeof(p_drv_buf->rx_rscn); } break; case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1; if (p_drv_buf->rx_rscn_nport_set[idx]) { p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx]; return sizeof(p_drv_buf->rx_rscn_nport[idx]); } break; case DRV_TLV_LUN_RESETS_ISSUED: if (p_drv_buf->tx_lun_rst_set) { p_buf->p_val = &p_drv_buf->tx_lun_rst; return sizeof(p_drv_buf->tx_lun_rst); } break; case DRV_TLV_ABORT_TASK_SETS_ISSUED: if (p_drv_buf->abort_task_sets_set) { p_buf->p_val = &p_drv_buf->abort_task_sets; return sizeof(p_drv_buf->abort_task_sets); } break; case DRV_TLV_TPRLOS_SENT: if (p_drv_buf->tx_tprlos_set) { p_buf->p_val = &p_drv_buf->tx_tprlos; return sizeof(p_drv_buf->tx_tprlos); } break; case DRV_TLV_NOS_SENT_COUNT: if (p_drv_buf->tx_nos_set) { p_buf->p_val = &p_drv_buf->tx_nos; return sizeof(p_drv_buf->tx_nos); } break; case DRV_TLV_NOS_RECEIVED_COUNT: if (p_drv_buf->rx_nos_set) { p_buf->p_val = &p_drv_buf->rx_nos; return sizeof(p_drv_buf->rx_nos); } break; case DRV_TLV_OLS_COUNT: if (p_drv_buf->ols_set) { p_buf->p_val = &p_drv_buf->ols; return sizeof(p_drv_buf->ols); } break; case DRV_TLV_LR_COUNT: if (p_drv_buf->lr_set) { p_buf->p_val = &p_drv_buf->lr; return sizeof(p_drv_buf->lr); } break; case DRV_TLV_LRR_COUNT: if (p_drv_buf->lrr_set) { p_buf->p_val = &p_drv_buf->lrr; return sizeof(p_drv_buf->lrr); } break; case DRV_TLV_LIP_SENT_COUNT: if (p_drv_buf->tx_lip_set) { p_buf->p_val = &p_drv_buf->tx_lip; return sizeof(p_drv_buf->tx_lip); } break; case DRV_TLV_LIP_RECEIVED_COUNT: if (p_drv_buf->rx_lip_set) { p_buf->p_val = &p_drv_buf->rx_lip; return sizeof(p_drv_buf->rx_lip); } break; case DRV_TLV_EOFA_COUNT: if (p_drv_buf->eofa_set) { p_buf->p_val = &p_drv_buf->eofa; return sizeof(p_drv_buf->eofa); } break; case DRV_TLV_EOFNI_COUNT: if (p_drv_buf->eofni_set) { p_buf->p_val = &p_drv_buf->eofni; return sizeof(p_drv_buf->eofni); } break; case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: if (p_drv_buf->scsi_chks_set) { p_buf->p_val = &p_drv_buf->scsi_chks; return sizeof(p_drv_buf->scsi_chks); } break; case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: if (p_drv_buf->scsi_cond_met_set) { p_buf->p_val = &p_drv_buf->scsi_cond_met; return sizeof(p_drv_buf->scsi_cond_met); } break; case DRV_TLV_SCSI_STATUS_BUSY_COUNT: if (p_drv_buf->scsi_busy_set) { p_buf->p_val = &p_drv_buf->scsi_busy; return sizeof(p_drv_buf->scsi_busy); } break; case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: if (p_drv_buf->scsi_inter_set) { p_buf->p_val = &p_drv_buf->scsi_inter; return sizeof(p_drv_buf->scsi_inter); } break; case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: if (p_drv_buf->scsi_inter_cond_met_set) { p_buf->p_val = &p_drv_buf->scsi_inter_cond_met; return sizeof(p_drv_buf->scsi_inter_cond_met); } break; case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: if (p_drv_buf->scsi_rsv_conflicts_set) { p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts; return sizeof(p_drv_buf->scsi_rsv_conflicts); } break; case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: if (p_drv_buf->scsi_tsk_full_set) { p_buf->p_val = &p_drv_buf->scsi_tsk_full; return sizeof(p_drv_buf->scsi_tsk_full); } break; case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: if (p_drv_buf->scsi_aca_active_set) { p_buf->p_val = &p_drv_buf->scsi_aca_active; return sizeof(p_drv_buf->scsi_aca_active); } break; case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: if (p_drv_buf->scsi_tsk_abort_set) { p_buf->p_val = &p_drv_buf->scsi_tsk_abort; return sizeof(p_drv_buf->scsi_tsk_abort); } break; case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2; if (p_drv_buf->scsi_rx_chk_set[idx]) { p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx]; return sizeof(p_drv_buf->scsi_rx_chk[idx]); } break; case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2; p_time = &p_drv_buf->scsi_chk_tstamp[idx]; return qed_mfw_get_tlv_time_value(p_time, p_buf); default: break; } return -1; } static int qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv, struct qed_mfw_tlv_iscsi *p_drv_buf, struct qed_tlv_parsed_buf *p_buf) { switch (p_tlv->tlv_type) { case DRV_TLV_TARGET_LLMNR_ENABLED: if (p_drv_buf->target_llmnr_set) { p_buf->p_val = &p_drv_buf->target_llmnr; return sizeof(p_drv_buf->target_llmnr); } break; case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: if (p_drv_buf->header_digest_set) { p_buf->p_val = &p_drv_buf->header_digest; return sizeof(p_drv_buf->header_digest); } break; case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: if (p_drv_buf->data_digest_set) { p_buf->p_val = &p_drv_buf->data_digest; return sizeof(p_drv_buf->data_digest); } break; case DRV_TLV_AUTHENTICATION_METHOD: if (p_drv_buf->auth_method_set) { p_buf->p_val = &p_drv_buf->auth_method; return sizeof(p_drv_buf->auth_method); } break; case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: if (p_drv_buf->boot_taget_portal_set) { p_buf->p_val = &p_drv_buf->boot_taget_portal; return sizeof(p_drv_buf->boot_taget_portal); } break; case DRV_TLV_MAX_FRAME_SIZE: if (p_drv_buf->frame_size_set) { p_buf->p_val = &p_drv_buf->frame_size; return sizeof(p_drv_buf->frame_size); } break; case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->tx_desc_size_set) { p_buf->p_val = &p_drv_buf->tx_desc_size; return sizeof(p_drv_buf->tx_desc_size); } break; case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: if (p_drv_buf->rx_desc_size_set) { p_buf->p_val = &p_drv_buf->rx_desc_size; return sizeof(p_drv_buf->rx_desc_size); } break; case DRV_TLV_ISCSI_BOOT_PROGRESS: if (p_drv_buf->boot_progress_set) { p_buf->p_val = &p_drv_buf->boot_progress; return sizeof(p_drv_buf->boot_progress); } break; case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: if (p_drv_buf->tx_desc_qdepth_set) { p_buf->p_val = &p_drv_buf->tx_desc_qdepth; return sizeof(p_drv_buf->tx_desc_qdepth); } break; case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: if (p_drv_buf->rx_desc_qdepth_set) { p_buf->p_val = &p_drv_buf->rx_desc_qdepth; return sizeof(p_drv_buf->rx_desc_qdepth); } break; case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: if (p_drv_buf->rx_frames_set) { p_buf->p_val = &p_drv_buf->rx_frames; return sizeof(p_drv_buf->rx_frames); } break; case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: if (p_drv_buf->rx_bytes_set) { p_buf->p_val = &p_drv_buf->rx_bytes; return sizeof(p_drv_buf->rx_bytes); } break; case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: if (p_drv_buf->tx_frames_set) { p_buf->p_val = &p_drv_buf->tx_frames; return sizeof(p_drv_buf->tx_frames); } break; case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: if (p_drv_buf->tx_bytes_set) { p_buf->p_val = &p_drv_buf->tx_bytes; return sizeof(p_drv_buf->tx_bytes); } break; default: break; } return -1; } static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, u8 tlv_group, u8 *p_mfw_buf, u32 size) { union qed_mfw_tlv_data *p_tlv_data; struct qed_tlv_parsed_buf buffer; struct qed_drv_tlv_hdr tlv; int len = 0; u32 offset; u8 *p_tlv; p_tlv_data = vzalloc(sizeof(*p_tlv_data)); if (!p_tlv_data) return -ENOMEM; if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) { vfree(p_tlv_data); return -EINVAL; } memset(&tlv, 0, sizeof(tlv)); for (offset = 0; offset < size; offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { p_tlv = &p_mfw_buf[offset]; tlv.tlv_type = TLV_TYPE(p_tlv); tlv.tlv_length = TLV_LENGTH(p_tlv); tlv.tlv_flags = TLV_FLAGS(p_tlv); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Type %d length = %d flags = 0x%x\n", tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags); if (tlv_group == QED_MFW_TLV_GENERIC) len = qed_mfw_get_gen_tlv_value(&tlv, &p_tlv_data->generic, &buffer); else if (tlv_group == QED_MFW_TLV_ETH) len = qed_mfw_get_eth_tlv_value(&tlv, &p_tlv_data->eth, &buffer); else if (tlv_group == QED_MFW_TLV_FCOE) len = qed_mfw_get_fcoe_tlv_value(&tlv, &p_tlv_data->fcoe, &buffer); else len = qed_mfw_get_iscsi_tlv_value(&tlv, &p_tlv_data->iscsi, &buffer); if (len > 0) { WARN(len > 4 * tlv.tlv_length, "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n", len, 4 * tlv.tlv_length); len = min_t(int, len, 4 * tlv.tlv_length); tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED; TLV_FLAGS(p_tlv) = tlv.tlv_flags; memcpy(p_mfw_buf + offset + sizeof(tlv), buffer.p_val, len); } } vfree(p_tlv_data); return 0; } int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr, size, offset, resp, param, val, global_offsize, global_addr; u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp; struct qed_drv_tlv_hdr tlv; int rc; addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_GLOBAL); global_offsize = qed_rd(p_hwfn, p_ptt, addr); global_addr = SECTION_ADDR(global_offsize, 0); addr = global_addr + offsetof(struct public_global, data_ptr); addr = qed_rd(p_hwfn, p_ptt, addr); size = qed_rd(p_hwfn, p_ptt, global_addr + offsetof(struct public_global, data_size)); if (!size) { DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size); goto drv_done; } p_mfw_buf = vzalloc(size); if (!p_mfw_buf) { DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n"); goto drv_done; } /* Read the TLV request to local buffer. MFW represents the TLV in * little endian format and mcp returns it bigendian format. Hence * driver need to convert data to little endian first and then do the * memcpy (casting) to preserve the MFW TLV format in the driver buffer. * */ for (offset = 0; offset < size; offset += sizeof(u32)) { val = qed_rd(p_hwfn, p_ptt, addr + offset); val = be32_to_cpu((__force __be32)val); memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); } /* Parse the headers to enumerate the requested TLV groups */ for (offset = 0; offset < size; offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { p_temp = &p_mfw_buf[offset]; tlv.tlv_type = TLV_TYPE(p_temp); tlv.tlv_length = TLV_LENGTH(p_temp); if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "Un recognized TLV %d\n", tlv.tlv_type); } /* Sanitize the TLV groups according to personality */ if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Skipping L2 TLVs for non-L2 function\n"); tlv_group &= ~QED_MFW_TLV_ETH; } if ((tlv_group & QED_MFW_TLV_FCOE) && p_hwfn->hw_info.personality != QED_PCI_FCOE) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Skipping FCoE TLVs for non-FCoE function\n"); tlv_group &= ~QED_MFW_TLV_FCOE; } if ((tlv_group & QED_MFW_TLV_ISCSI) && p_hwfn->hw_info.personality != QED_PCI_ISCSI && p_hwfn->hw_info.personality != QED_PCI_NVMETCP) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Skipping iSCSI TLVs for non-iSCSI function\n"); tlv_group &= ~QED_MFW_TLV_ISCSI; } /* Update the TLV values in the local buffer */ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { if (tlv_group & id) if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) goto drv_done; } /* Write the TLV data to shared memory. The stream of 4 bytes first need * to be mem-copied to u32 element to make it as LSB format. And then * converted to big endian as required by mcp-write. */ for (offset = 0; offset < size; offset += sizeof(u32)) { memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); val = (__force u32)cpu_to_be32(val); qed_wr(p_hwfn, p_ptt, addr + offset, val); } drv_done: rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, &param); vfree(p_mfw_buf); return rc; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include "qlcnic.h" static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1}, {QLCNIC_CMD_INTRPT_TEST, 4, 1}, {QLCNIC_CMD_SET_MTU, 4, 1}, {QLCNIC_CMD_READ_PHY, 4, 2}, {QLCNIC_CMD_WRITE_PHY, 5, 1}, {QLCNIC_CMD_READ_HW_REG, 4, 1}, {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1}, {QLCNIC_CMD_CONFIG_PORT, 4, 1}, {QLCNIC_CMD_TEMP_SIZE, 4, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) { return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | (0xcafe << 16); } /* Allocate mailbox registers */ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, struct qlcnic_adapter *adapter, u32 type) { int i, size; const struct qlcnic_mailbox_metadata *mbx_tbl; mbx_tbl = qlcnic_mbx_tbl; size = ARRAY_SIZE(qlcnic_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), GFP_ATOMIC); if (!mbx->req.arg) return -ENOMEM; mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), GFP_ATOMIC); if (!mbx->rsp.arg) { kfree(mbx->req.arg); mbx->req.arg = NULL; return -ENOMEM; } mbx->req.arg[0] = type; break; } } return 0; } /* Free up mailbox registers */ void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) { kfree(cmd->req.arg); cmd->req.arg = NULL; kfree(cmd->rsp.arg); cmd->rsp.arg = NULL; } static u32 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) { u32 rsp; int timeout = 0, err = 0; do { /* give atleast 1ms for firmware to respond */ mdelay(1); if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) return QLCNIC_CDRP_RSP_TIMEOUT; rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err); } while (!QLCNIC_CDRP_IS_RSP(rsp)); return rsp; } int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { int i, err = 0; u32 rsp; u32 signature; struct pci_dev *pdev = adapter->pdev; struct qlcnic_hardware_context *ahw = adapter->ahw; const char *fmt; signature = qlcnic_get_cmd_signature(ahw); /* Acquire semaphore before accessing CRB */ if (qlcnic_api_lock(adapter)) { cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; return cmd->rsp.arg[0]; } QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); for (i = 1; i < cmd->req.num; i++) QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); rsp = qlcnic_poll_rsp(adapter); if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp); cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); switch (cmd->rsp.arg[0]) { case QLCNIC_RCODE_INVALID_ARGS: fmt = "CDRP invalid args: [%d]\n"; break; case QLCNIC_RCODE_NOT_SUPPORTED: case QLCNIC_RCODE_NOT_IMPL: fmt = "CDRP command not supported: [%d]\n"; break; case QLCNIC_RCODE_NOT_PERMITTED: fmt = "CDRP requested action not permitted: [%d]\n"; break; case QLCNIC_RCODE_INVALID: fmt = "CDRP invalid or unknown cmd received: [%d]\n"; break; case QLCNIC_RCODE_TIMEOUT: fmt = "CDRP command timeout: [%d]\n"; break; default: fmt = "CDRP command failed: [%d]\n"; break; } dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); qlcnic_dump_mbx(adapter, cmd); } else if (rsp == QLCNIC_CDRP_RSP_OK) cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; for (i = 1; i < cmd->rsp.num; i++) cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err); /* Release semaphore */ qlcnic_api_unlock(adapter); return cmd->rsp.arg[0]; } int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) { struct qlcnic_cmd_args cmd; u32 arg1, arg2, arg3; char drv_string[12]; int err = 0; memset(drv_string, 0, sizeof(drv_string)); snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, _QLCNIC_LINUX_SUBVERSION); err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); if (err) return err; memcpy(&arg1, drv_string, sizeof(u32)); memcpy(&arg2, drv_string + 4, sizeof(u32)); memcpy(&arg3, drv_string + 8, sizeof(u32)); cmd.req.arg[1] = arg1; cmd.req.arg[2] = arg2; cmd.req.arg[3] = arg3; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, "Failed to set driver version in firmware\n"); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) { int err = 0; struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) return err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); if (err) return err; cmd.req.arg[1] = recv_ctx->context_id; cmd.req.arg[2] = mtu; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_hardware_context *ahw = adapter->ahw; dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; struct net_device *netdev = adapter->netdev; u32 temp_intr_crb_mode, temp_rds_crb_mode; struct qlcnic_cardrsp_rds_ring *prsp_rds; struct qlcnic_cardrsp_sds_ring *prsp_sds; struct qlcnic_hostrq_rds_ring *prq_rds; struct qlcnic_hostrq_sds_ring *prq_sds; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_cardrsp_rx_ctx *prsp; struct qlcnic_hostrq_rx_ctx *prq; u8 i, nrds_rings, nsds_rings; struct qlcnic_cmd_args cmd; size_t rq_size, rsp_size; u32 cap, reg, val, reg2; u64 phys_addr; u16 temp_u16; void *addr; int err; nrds_rings = adapter->max_rds_rings; nsds_rings = adapter->drv_sds_rings; rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, nsds_rings); rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, nsds_rings); addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, &hostrq_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; prq = addr; addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, &cardrsp_phys_addr, GFP_KERNEL); if (addr == NULL) { err = -ENOMEM; goto out_free_rq; } prsp = addr; prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | QLCNIC_CAP0_VALIDOFF); cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { cap |= QLCNIC_CAP0_TX_MULTI; } else { temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); prq->valid_field_offset = cpu_to_le16(temp_u16); prq->txrx_sds_binding = nsds_rings - 1; temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE; prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); } prq->capabilities[0] = cpu_to_le32(cap); prq->num_rds_rings = cpu_to_le16(nrds_rings); prq->num_sds_rings = cpu_to_le16(nsds_rings); prq->rds_ring_offset = 0; val = le32_to_cpu(prq->rds_ring_offset) + (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); prq->sds_ring_offset = cpu_to_le32(val); prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + le32_to_cpu(prq->rds_ring_offset)); for (i = 0; i < nrds_rings; i++) { rds_ring = &recv_ctx->rds_rings[i]; rds_ring->producer = 0; prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); prq_rds[i].ring_kind = cpu_to_le32(i); prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); } prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + le32_to_cpu(prq->sds_ring_offset)); for (i = 0; i < nsds_rings; i++) { sds_ring = &recv_ctx->sds_rings[i]; sds_ring->consumer = 0; memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); else prq_sds[i].msi_index = cpu_to_le16(i); } phys_addr = hostrq_phys_addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); if (err) goto out_free_rsp; cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to create rx ctx in firmware%d\n", err); goto out_free_rsp; } prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { rds_ring = &recv_ctx->rds_rings[i]; reg = le32_to_cpu(prsp_rds[i].host_producer_crb); rds_ring->crb_rcv_producer = ahw->pci_base0 + reg; } prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { sds_ring = &recv_ctx->sds_rings[i]; reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) reg2 = ahw->intr_tbl[i].src; else reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); sds_ring->crb_intr_mask = ahw->pci_base0 + reg2; sds_ring->crb_sts_consumer = ahw->pci_base0 + reg; } recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n", recv_ctx->context_id, recv_ctx->state); qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, cardrsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); return err; } void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) { int err; struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); if (err) return; cmd.req.arg[1] = recv_ctx->context_id; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to destroy rx ctx in firmware\n"); recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; qlcnic_free_mbx_args(&cmd); } int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring, int ring) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct net_device *netdev = adapter->netdev; struct qlcnic_hostrq_tx_ctx *prq; struct qlcnic_hostrq_cds_ring *prq_cds; struct qlcnic_cardrsp_tx_ctx *prsp; struct qlcnic_cmd_args cmd; u32 temp, intr_mask, temp_int_crb_mode; dma_addr_t rq_phys_addr, rsp_phys_addr; int temp_nsds_rings, index, err; void *rq_addr, *rsp_addr; size_t rq_size, rsp_size; u64 phys_addr; u16 msix_id; /* reset host resources */ tx_ring->producer = 0; tx_ring->sw_consumer = 0; *(tx_ring->hw_consumer) = 0; rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, &rq_phys_addr, GFP_KERNEL); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, &rsp_phys_addr, GFP_KERNEL); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; } prq = rq_addr; prsp = rsp_addr; prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | QLCNIC_CAP0_LSO); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) temp |= QLCNIC_CAP0_TX_MULTI; prq->capabilities[0] = cpu_to_le32(temp); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { temp_nsds_rings = adapter->drv_sds_rings; index = temp_nsds_rings + ring; msix_id = ahw->intr_tbl[index].id; prq->msi_index = cpu_to_le16(msix_id); } else { temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode); prq->msi_index = 0; } prq->interrupt_ctl = 0; prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); prq_cds = &prq->cds_ring; prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); phys_addr = rq_phys_addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); if (err) goto out_free_rsp; cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { tx_ring->state = le32_to_cpu(prsp->host_ctx_state); temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; tx_ring->ctx_id = le16_to_cpu(prsp->context_id); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) { index = adapter->drv_sds_rings + ring; intr_mask = ahw->intr_tbl[index].src; tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; } netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n", tx_ring->ctx_id, tx_ring->state); } else { netdev_err(netdev, "Failed to create tx ctx in firmware%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, rsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); return err; } void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; int ret; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); if (ret) return; cmd.req.arg[1] = tx_ring->ctx_id; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to destroy tx ctx in firmware\n"); qlcnic_free_mbx_args(&cmd); } int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) { int err; struct qlcnic_cmd_args cmd; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); if (err) return err; cmd.req.arg[1] = config; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) { void *addr; int err, ring; struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; __le32 *ptr; struct pci_dev *pdev = adapter->pdev; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); if (ptr == NULL) { err = -ENOMEM; goto err_out_free; } tx_ring->hw_consumer = ptr; /* cmd desc ring */ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), &tx_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { err = -ENOMEM; goto err_out_free; } tx_ring->desc_head = addr; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, RCV_DESC_RINGSIZE(rds_ring), &rds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { err = -ENOMEM; goto err_out_free; } rds_ring->desc_head = addr; } for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, STATUS_DESC_RINGSIZE(sds_ring), &sds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { err = -ENOMEM; goto err_out_free; } sds_ring->desc_head = addr; } return 0; err_out_free: qlcnic_free_hw_resources(adapter); return err; } int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) { int i, err, ring; if (dev->flags & QLCNIC_NEED_FLR) { err = pci_reset_function(dev->pdev); if (err) { dev_err(&dev->pdev->dev, "Adapter reset failed (%d). Please reboot\n", err); return err; } dev->flags &= ~QLCNIC_NEED_FLR; } if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { err = qlcnic_83xx_config_intrpt(dev, 1); if (err) return err; } } if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) { err = qlcnic_82xx_mq_intrpt(dev, 1); if (err) return err; } err = qlcnic_fw_cmd_create_rx_ctx(dev); if (err) goto err_out; for (ring = 0; ring < dev->drv_tx_rings; ring++) { err = qlcnic_fw_cmd_create_tx_ctx(dev, &dev->tx_ring[ring], ring); if (err) { qlcnic_fw_cmd_del_rx_ctx(dev); if (ring == 0) goto err_out; for (i = 0; i < ring; i++) qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); goto err_out; } } set_bit(__QLCNIC_FW_ATTACHED, &dev->state); return 0; err_out: if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) qlcnic_82xx_config_intrpt(dev, 0); if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) qlcnic_83xx_config_intrpt(dev, 0); } return err; } void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) { int ring; if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { qlcnic_fw_cmd_del_rx_ctx(adapter); for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_fw_cmd_del_tx_ctx(adapter, &adapter->tx_ring[ring]); if (qlcnic_82xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) qlcnic_82xx_config_intrpt(adapter, 0); if (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED)) { if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) qlcnic_83xx_config_intrpt(adapter, 0); } /* Allow dma queues to drain after context reset */ mdelay(20); } } void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; int ring; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring->hw_consumer != NULL) { dma_free_coherent(&adapter->pdev->dev, sizeof(u32), tx_ring->hw_consumer, tx_ring->hw_cons_phys_addr); tx_ring->hw_consumer = NULL; } if (tx_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, TX_DESC_RINGSIZE(tx_ring), tx_ring->desc_head, tx_ring->phys_addr); tx_ring->desc_head = NULL; } } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; if (rds_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, RCV_DESC_RINGSIZE(rds_ring), rds_ring->desc_head, rds_ring->phys_addr); rds_ring->desc_head = NULL; } } for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (sds_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, STATUS_DESC_RINGSIZE(sds_ring), sds_ring->desc_head, sds_ring->phys_addr); sds_ring->desc_head = NULL; } } } int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct net_device *netdev = adapter->netdev; struct qlcnic_cmd_args cmd; u32 type, val; int i, err = 0; for (i = 0; i < ahw->num_msix; i++) { err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MQ_TX_CONFIG_INTR); if (err) return err; type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (ahw->intr_tbl[i].type << 4); if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) val |= (ahw->intr_tbl[i].id << 16); cmd.req.arg[1] = val; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { netdev_err(netdev, "Failed to %s interrupts %d\n", op_type == QLCNIC_INTRPT_ADD ? "Add" : "Delete", err); qlcnic_free_mbx_args(&cmd); return err; } val = cmd.rsp.arg[1]; if (LSB(val)) { netdev_info(netdev, "failed to configure interrupt for %d\n", ahw->intr_tbl[i].id); continue; } if (op_type) { ahw->intr_tbl[i].id = MSW(val); ahw->intr_tbl[i].enabled = 1; ahw->intr_tbl[i].src = cmd.rsp.arg[2]; } else { ahw->intr_tbl[i].id = i; ahw->intr_tbl[i].enabled = 0; ahw->intr_tbl[i].src = 0; } qlcnic_free_mbx_args(&cmd); } return err; } int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, u8 function) { int err, i; struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); if (err) return err; cmd.req.arg[1] = function | BIT_8; err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { mac_low = cmd.rsp.arg[1]; mac_high = cmd.rsp.arg[2]; for (i = 0; i < 2; i++) mac[i] = (u8) (mac_high >> ((1 - i) * 8)); for (i = 2; i < 6; i++) mac[i] = (u8) (mac_low >> ((5 - i) * 8)); } else { dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } /* Get info of a NIC partition */ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u8 func_id) { int err; dma_addr_t nic_dma_t; const struct qlcnic_info_le *nic_info; void *nic_info_addr; struct qlcnic_cmd_args cmd; size_t nic_size = sizeof(struct qlcnic_info_le); nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; nic_info = nic_info_addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); if (err) goto out_free_dma; cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = (func_id << 16 | nic_size); err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to get nic info%d\n", err); err = -EIO; } else { npar_info->pci_func = le16_to_cpu(nic_info->pci_func); npar_info->op_mode = le16_to_cpu(nic_info->op_mode); npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); npar_info->phys_port = le16_to_cpu(nic_info->phys_port); npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); npar_info->capabilities = le32_to_cpu(nic_info->capabilities); npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); } qlcnic_free_mbx_args(&cmd); out_free_dma: dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, nic_dma_t); return err; } /* Configure a NIC partition */ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) { int err = -EIO; dma_addr_t nic_dma_t; void *nic_info_addr; struct qlcnic_cmd_args cmd; struct qlcnic_info_le *nic_info; size_t nic_size = sizeof(struct qlcnic_info_le); if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return err; nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, &nic_dma_t, GFP_KERNEL); if (!nic_info_addr) return -ENOMEM; nic_info = nic_info_addr; nic_info->pci_func = cpu_to_le16(nic->pci_func); nic_info->op_mode = cpu_to_le16(nic->op_mode); nic_info->phys_port = cpu_to_le16(nic->phys_port); nic_info->switch_mode = cpu_to_le16(nic->switch_mode); nic_info->capabilities = cpu_to_le32(nic->capabilities); nic_info->max_mac_filters = nic->max_mac_filters; nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) goto out_free_dma; cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); out_free_dma: dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, nic_dma_t); return err; } /* Get PCI Info of a partition */ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, struct qlcnic_pci_info *pci_info) { struct qlcnic_hardware_context *ahw = adapter->ahw; size_t npar_size = sizeof(struct qlcnic_pci_info_le); size_t pci_size = npar_size * ahw->max_vnic_func; u16 nic = 0, fcoe = 0, iscsi = 0; struct qlcnic_pci_info_le *npar; struct qlcnic_cmd_args cmd; dma_addr_t pci_info_dma_t; void *pci_info_addr; int err = 0, i; pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, &pci_info_dma_t, GFP_KERNEL); if (!pci_info_addr) return -ENOMEM; npar = pci_info_addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); if (err) goto out_free_dma; cmd.req.arg[1] = MSD(pci_info_dma_t); cmd.req.arg[2] = LSD(pci_info_dma_t); cmd.req.arg[3] = pci_size; err = qlcnic_issue_cmd(adapter, &cmd); ahw->total_nic_func = 0; if (err == QLCNIC_RCODE_SUCCESS) { for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) { pci_info->id = le16_to_cpu(npar->id); pci_info->active = le16_to_cpu(npar->active); if (!pci_info->active) continue; pci_info->type = le16_to_cpu(npar->type); err = qlcnic_get_pci_func_type(adapter, pci_info->type, &nic, &fcoe, &iscsi); pci_info->default_port = le16_to_cpu(npar->default_port); pci_info->tx_min_bw = le16_to_cpu(npar->tx_min_bw); pci_info->tx_max_bw = le16_to_cpu(npar->tx_max_bw); memcpy(pci_info->mac, npar->mac, ETH_ALEN); } } else { dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n", err); err = -EIO; } ahw->total_nic_func = nic; ahw->total_pci_func = nic + fcoe + iscsi; if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) { dev_err(&adapter->pdev->dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n", __func__, ahw->total_nic_func, ahw->total_pci_func); err = -EIO; } qlcnic_free_mbx_args(&cmd); out_free_dma: dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, pci_info_dma_t); return err; } /* Configure eSwitch for port mirroring */ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, u8 enable_mirroring, u8 pci_func) { struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; int err = -EIO; u32 arg1; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) { dev_err(&adapter->pdev->dev, "%s: Not a management function\n", __func__); return err; } arg1 = id | (enable_mirroring ? BIT_4 : 0); arg1 |= pci_func << 8; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); if (err) return err; cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", pci_func, id); else dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", pci_func, id); qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { size_t stats_size = sizeof(struct qlcnic_esw_stats_le); struct qlcnic_esw_stats_le *stats; dma_addr_t stats_dma_t; void *stats_addr; u32 arg1; struct qlcnic_cmd_args cmd; int err; if (esw_stats == NULL) return -ENOMEM; if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && (func != adapter->ahw->pci_func)) { dev_err(&adapter->pdev->dev, "Not privilege to query stats for func=%d", func); return -EIO; } stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 |= rx_tx << 15 | stats_size << 16; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); if (err) goto out_free_dma; cmd.req.arg[1] = arg1; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); err = qlcnic_issue_cmd(adapter, &cmd); if (!err) { stats = stats_addr; esw_stats->context_id = le16_to_cpu(stats->context_id); esw_stats->version = le16_to_cpu(stats->version); esw_stats->size = le16_to_cpu(stats->size); esw_stats->multicast_frames = le64_to_cpu(stats->multicast_frames); esw_stats->broadcast_frames = le64_to_cpu(stats->broadcast_frames); esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); esw_stats->local_frames = le64_to_cpu(stats->local_frames); esw_stats->errors = le64_to_cpu(stats->errors); esw_stats->numbytes = le64_to_cpu(stats->numbytes); } qlcnic_free_mbx_args(&cmd); out_free_dma: dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, stats_dma_t); return err; } /* This routine will retrieve the MAC statistics from firmware */ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, struct qlcnic_mac_statistics *mac_stats) { struct qlcnic_mac_statistics_le *stats; struct qlcnic_cmd_args cmd; size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); dma_addr_t stats_dma_t; void *stats_addr; int err; if (mac_stats == NULL) return -ENOMEM; stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, &stats_dma_t, GFP_KERNEL); if (!stats_addr) return -ENOMEM; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); if (err) goto out_free_dma; cmd.req.arg[1] = stats_size << 16; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); err = qlcnic_issue_cmd(adapter, &cmd); if (!err) { stats = stats_addr; mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); mac_stats->mac_tx_mcast_pkts = le64_to_cpu(stats->mac_tx_mcast_pkts); mac_stats->mac_tx_bcast_pkts = le64_to_cpu(stats->mac_tx_bcast_pkts); mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); mac_stats->mac_rx_mcast_pkts = le64_to_cpu(stats->mac_rx_mcast_pkts); mac_stats->mac_rx_length_error = le64_to_cpu(stats->mac_rx_length_error); mac_stats->mac_rx_length_small = le64_to_cpu(stats->mac_rx_length_small); mac_stats->mac_rx_length_large = le64_to_cpu(stats->mac_rx_length_large); mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); } else { dev_err(&adapter->pdev->dev, "%s: Get mac stats failed, err=%d.\n", __func__, err); } qlcnic_free_mbx_args(&cmd); out_free_dma: dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, stats_dma_t); return err; } int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { struct __qlcnic_esw_statistics port_stats; u8 i; int ret = -EIO; if (esw_stats == NULL) return -ENOMEM; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return -EIO; if (adapter->npars == NULL) return -EIO; memset(esw_stats, 0, sizeof(u64)); esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; esw_stats->context_id = eswitch; for (i = 0; i < adapter->ahw->total_nic_func; i++) { if (adapter->npars[i].phy_port != eswitch) continue; memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, rx_tx, &port_stats)) continue; esw_stats->size = port_stats.size; esw_stats->version = port_stats.version; QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, port_stats.unicast_frames); QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, port_stats.multicast_frames); QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, port_stats.broadcast_frames); QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, port_stats.dropped_frames); QLCNIC_ADD_ESW_STATS(esw_stats->errors, port_stats.errors); QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, port_stats.local_frames); QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, port_stats.numbytes); ret = 0; } return ret; } int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, const u8 port, const u8 rx_tx) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; int err; u32 arg1; if (ahw->op_mode != QLCNIC_MGMT_FUNC) return -EIO; if (func_esw == QLCNIC_STATS_PORT) { if (port >= ahw->max_vnic_func) goto err_ret; } else if (func_esw == QLCNIC_STATS_ESWITCH) { if (port >= QLCNIC_NIU_MAX_XG_PORTS) goto err_ret; } else { goto err_ret; } if (rx_tx > QLCNIC_QUERY_TX_COUNTER) goto err_ret; arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; arg1 |= BIT_14 | rx_tx << 15; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); if (err) return err; cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); return err; err_ret: dev_err(&adapter->pdev->dev, "Invalid args func_esw %d port %d rx_ctx %d\n", func_esw, port, rx_tx); return -EIO; } static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, u32 *arg1, u32 *arg2) { struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; u8 pci_func = *arg1 >> 8; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); if (err) return err; cmd.req.arg[1] = *arg1; err = qlcnic_issue_cmd(adapter, &cmd); *arg1 = cmd.rsp.arg[1]; *arg2 = cmd.rsp.arg[2]; qlcnic_free_mbx_args(&cmd); if (err == QLCNIC_RCODE_SUCCESS) dev_info(dev, "Get eSwitch port config for vNIC function %d\n", pci_func); else dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", pci_func); return err; } /* Configure eSwitch port op_mode = 0 for setting default port behavior op_mode = 1 for setting vlan id op_mode = 2 for deleting vlan id op_type = 0 for vlan_id op_type = 1 for port vlan_id */ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; int err = -EIO, index; u32 arg1, arg2 = 0; u8 pci_func; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { dev_err(&adapter->pdev->dev, "%s: Not a management function\n", __func__); return err; } pci_func = esw_cfg->pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) return err; arg1 = (adapter->npars[index].phy_port & BIT_0); arg1 |= (pci_func << 8); if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) return err; arg1 &= ~(0x0ff << 8); arg1 |= (pci_func << 8); arg1 &= ~(BIT_2 | BIT_3); switch (esw_cfg->op_mode) { case QLCNIC_PORT_DEFAULTS: arg1 |= (BIT_4 | BIT_6 | BIT_7); arg2 |= (BIT_0 | BIT_1); if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) arg2 |= (BIT_2 | BIT_3); if (!(esw_cfg->discard_tagged)) arg1 &= ~BIT_4; if (!(esw_cfg->promisc_mode)) arg1 &= ~BIT_6; if (!(esw_cfg->mac_override)) arg1 &= ~BIT_7; if (!(esw_cfg->mac_anti_spoof)) arg2 &= ~BIT_0; if (!(esw_cfg->offload_flags & BIT_0)) arg2 &= ~(BIT_1 | BIT_2 | BIT_3); if (!(esw_cfg->offload_flags & BIT_1)) arg2 &= ~BIT_2; if (!(esw_cfg->offload_flags & BIT_2)) arg2 &= ~BIT_3; break; case QLCNIC_ADD_VLAN: arg1 &= ~(0x0ffff << 16); arg1 |= (BIT_2 | BIT_5); arg1 |= (esw_cfg->vlan_id << 16); break; case QLCNIC_DEL_VLAN: arg1 |= (BIT_3 | BIT_5); arg1 &= ~(0x0ffff << 16); break; default: dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n", __func__, esw_cfg->op_mode); return err; } err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); if (err) return err; cmd.req.arg[1] = arg1; cmd.req.arg[2] = arg2; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); if (err != QLCNIC_RCODE_SUCCESS) dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", pci_func); else dev_info(dev, "Configured eSwitch for vNIC function %d\n", pci_func); return err; } int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { u32 arg1, arg2; int index; u8 phy_port; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); if (index < 0) return -EIO; phy_port = adapter->npars[index].phy_port; } else { phy_port = adapter->ahw->physical_port; } arg1 = phy_port; arg1 |= (esw_cfg->pci_func << 8); if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) return -EIO; esw_cfg->discard_tagged = !!(arg1 & BIT_4); esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); esw_cfg->promisc_mode = !!(arg1 & BIT_6); esw_cfg->mac_override = !!(arg1 & BIT_7); esw_cfg->vlan_id = LSW(arg1 >> 16); esw_cfg->mac_anti_spoof = (arg2 & 0x1); esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); return 0; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/swab.h> #include <linux/dma-mapping.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> #include <linux/log2.h> #include <linux/pci.h> #include <net/vxlan.h> #include "qlcnic.h" #include "qlcnic_sriov.h" #include "qlcnic_hw.h" MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLCNIC_LINUX_VERSIONID); MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME); char qlcnic_driver_name[] = "qlcnic"; static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; static int qlcnic_mac_learn; module_param(qlcnic_mac_learn, int, 0444); MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); int qlcnic_use_msi = 1; MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); int qlcnic_use_msi_x = 1; MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); int qlcnic_auto_fw_reset = 1; MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); static int qlcnic_open(struct net_device *netdev); static int qlcnic_close(struct net_device *netdev); static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); static irqreturn_t qlcnic_tmp_intr(int irq, void *data); static irqreturn_t qlcnic_intr(int irq, void *data); static irqreturn_t qlcnic_msi_intr(int irq, void *data); static irqreturn_t qlcnic_msix_intr(int irq, void *data); static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data); static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); static int qlcnic_start_firmware(struct qlcnic_adapter *); static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *); static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *); static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); static void qlcnic_82xx_io_resume(struct pci_dev *); static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, pci_channel_state_t); static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X) return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX; else return 1; } /* PCI Device ID Table */ #define ENTRY(device) \ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), {0,} }; MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring) { writel(tx_ring->producer, tx_ring->crb_cmd_producer); } static const u32 msi_tgt_status[8] = { ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 }; static const u32 qlcnic_reg_tbl[] = { 0x1B20A8, /* PEG_HALT_STAT1 */ 0x1B20AC, /* PEG_HALT_STAT2 */ 0x1B20B0, /* FW_HEARTBEAT */ 0x1B2100, /* LOCK ID */ 0x1B2128, /* FW_CAPABILITIES */ 0x1B2138, /* drv active */ 0x1B2140, /* dev state */ 0x1B2144, /* drv state */ 0x1B2148, /* drv scratch */ 0x1B214C, /* dev partition info */ 0x1B2174, /* drv idc ver */ 0x1B2150, /* fw version major */ 0x1B2154, /* fw version minor */ 0x1B2158, /* fw version sub */ 0x1B219C, /* npar state */ 0x1B21FC, /* FW_IMG_VALID */ 0x1B2250, /* CMD_PEG_STATE */ 0x1B233C, /* RCV_PEG_STATE */ 0x1B23B4, /* ASIC TEMP */ 0x1B216C, /* FW api */ 0x1B2170, /* drv op mode */ 0x13C010, /* flash lock */ 0x13C014, /* flash unlock */ }; static const struct qlcnic_board_info qlcnic_boards[] = { { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE844X, 0x0, 0x0, "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x24e, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x243, "8300 Series Single Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x24a, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x246, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x252, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x26e, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x260, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x266, "8300 Series Single Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x269, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, PCI_VENDOR_ID_QLOGIC, 0x271, "8300 Series Dual Port 10GbE Converged Network Adapter " "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE834X, 0x0, 0x0, "8300 Series 1/10GbE Controller" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE8830, 0x0, 0x0, "8830 Series 1/10GbE Controller" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x203, "8200 Series Single Port 10GbE Converged Network Adapter" "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x207, "8200 Series Dual Port 10GbE Converged Network Adapter" "(TCP/IP Networking)" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x20b, "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x20c, "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x20f, "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, 0x103c, 0x3733, "NC523SFP 10Gb 2-port Server Adapter" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, 0x103c, 0x3346, "CN1000Q Dual Port Converged Network Adapter" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x210, "QME8242-k 10GbE Dual Port Mezzanine Card" }, { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, 0x0, 0x0, "cLOM8214 1/10GbE Controller" }, }; #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) static const struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) { int size = sizeof(struct qlcnic_host_sds_ring) * count; recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); return recv_ctx->sds_rings == NULL; } void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) { kfree(recv_ctx->sds_rings); recv_ctx->sds_rings = NULL; } int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; u8 mac_addr[ETH_ALEN]; int ret; ret = qlcnic_get_mac_address(adapter, mac_addr, adapter->ahw->pci_func); if (ret) return ret; eth_hw_addr_set(netdev, mac_addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ if (!is_valid_ether_addr(netdev->dev_addr)) dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); return 0; } static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) { struct qlcnic_mac_vlan_list *cur; list_for_each_entry(cur, &adapter->mac_list, list) { if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) { qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 0, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); return; } } } static int qlcnic_set_mac(struct net_device *netdev, void *p) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (qlcnic_sriov_vf_check(adapter)) return -EINVAL; if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) return -EOPNOTSUPP; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data) && ether_addr_equal_unaligned(netdev->dev_addr, addr->sa_data)) return 0; if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); eth_hw_addr_set(netdev, addr->sa_data); qlcnic_set_multi(adapter->netdev); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_attach(netdev); qlcnic_napi_enable(adapter); } return 0; } static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, const unsigned char *addr, u16 vid, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; if (!adapter->fdb_mac_learn) return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || qlcnic_sriov_check(adapter)) { if (is_unicast_ether_addr(addr)) { err = dev_uc_del(netdev, addr); if (!err) err = qlcnic_nic_del_mac(adapter, addr); } else if (is_multicast_ether_addr(addr)) { err = dev_mc_del(netdev, addr); } else { err = -EINVAL; } } return err; } static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, const unsigned char *addr, u16 vid, u16 flags, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; if (!adapter->fdb_mac_learn) return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) && !qlcnic_sriov_check(adapter)) { pr_info("%s: FDB e-switch is not enabled\n", __func__); return -EOPNOTSUPP; } if (ether_addr_equal(addr, adapter->mac_addr)) return err; if (is_unicast_ether_addr(addr)) { if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count) err = dev_uc_add_excl(netdev, addr); else err = -ENOMEM; } else if (is_multicast_ether_addr(addr)) { err = dev_mc_add_excl(netdev, addr); } else { err = -EINVAL; } return err; } static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, struct net_device *netdev, struct net_device *filter_dev, int *idx) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; if (!adapter->fdb_mac_learn) return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx); if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || qlcnic_sriov_check(adapter)) err = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx); return err; } static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) { while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) usleep_range(10000, 11000); if (!adapter->fw_work.work.func) return; cancel_delayed_work_sync(&adapter->fw_work); } static int qlcnic_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID)) return -EOPNOTSUPP; ppid->id_len = sizeof(ahw->phys_port_id); memcpy(ppid->id, ahw->phys_port_id, ppid->id_len); return 0; } static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table) { struct qlcnic_adapter *adapter = netdev_priv(dev); struct udp_tunnel_info ti; int err; udp_tunnel_nic_get_port(dev, table, 0, &ti); if (ti.port) { err = qlcnic_set_vxlan_port(adapter, ntohs(ti.port)); if (err) return err; } return qlcnic_set_vxlan_parsing(adapter, ntohs(ti.port)); } static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = { .sync_table = qlcnic_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, }; static netdev_features_t qlcnic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, .ndo_stop = qlcnic_close, .ndo_start_xmit = qlcnic_xmit_frame, .ndo_get_stats = qlcnic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qlcnic_set_multi, .ndo_set_mac_address = qlcnic_set_mac, .ndo_change_mtu = qlcnic_change_mtu, .ndo_fix_features = qlcnic_fix_features, .ndo_set_features = qlcnic_set_features, .ndo_tx_timeout = qlcnic_tx_timeout, .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, .ndo_fdb_add = qlcnic_fdb_add, .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, .ndo_get_phys_port_id = qlcnic_get_phys_port_id, .ndo_features_check = qlcnic_features_check, #ifdef CONFIG_QLCNIC_SRIOV .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, .ndo_get_vf_config = qlcnic_sriov_get_vf_config, .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, #endif }; static const struct net_device_ops qlcnic_netdev_failed_ops = { .ndo_open = qlcnic_open, }; static struct qlcnic_nic_template qlcnic_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_82xx_config_led, .start_firmware = qlcnic_82xx_start_firmware, .request_reset = qlcnic_82xx_dev_request_reset, .cancel_idc_work = qlcnic_82xx_cancel_idc_work, .napi_add = qlcnic_82xx_napi_add, .napi_del = qlcnic_82xx_napi_del, .config_ipaddr = qlcnic_82xx_config_ipaddr, .shutdown = qlcnic_82xx_shutdown, .resume = qlcnic_82xx_resume, .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, }; struct qlcnic_nic_template qlcnic_vf_ops = { .config_bridged_mode = qlcnicvf_config_bridged_mode, .config_led = qlcnicvf_config_led, .start_firmware = qlcnicvf_start_firmware }; static struct qlcnic_hardware_ops qlcnic_hw_ops = { .read_crb = qlcnic_82xx_read_crb, .write_crb = qlcnic_82xx_write_crb, .read_reg = qlcnic_82xx_hw_read_wx_2M, .write_reg = qlcnic_82xx_hw_write_wx_2M, .get_mac_address = qlcnic_82xx_get_mac_address, .setup_intr = qlcnic_82xx_setup_intr, .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args, .mbx_cmd = qlcnic_82xx_issue_cmd, .get_func_no = qlcnic_82xx_get_func_no, .api_lock = qlcnic_82xx_api_lock, .api_unlock = qlcnic_82xx_api_unlock, .add_sysfs = qlcnic_82xx_add_sysfs, .remove_sysfs = qlcnic_82xx_remove_sysfs, .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx, .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx, .setup_link_event = qlcnic_82xx_linkevent_request, .get_nic_info = qlcnic_82xx_get_nic_info, .get_pci_info = qlcnic_82xx_get_pci_info, .set_nic_info = qlcnic_82xx_set_nic_info, .change_macvlan = qlcnic_82xx_sre_macaddr_change, .napi_enable = qlcnic_82xx_napi_enable, .napi_disable = qlcnic_82xx_napi_disable, .config_intr_coal = qlcnic_82xx_config_intr_coalesce, .config_rss = qlcnic_82xx_config_rss, .config_hw_lro = qlcnic_82xx_config_hw_lro, .config_loopback = qlcnic_82xx_set_lb_mode, .clear_loopback = qlcnic_82xx_clear_lb_mode, .config_promisc_mode = qlcnic_82xx_nic_set_promisc, .change_l2_filter = qlcnic_82xx_change_filter, .get_board_info = qlcnic_82xx_get_board_info, .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, .read_phys_port_id = qlcnic_82xx_read_phys_port_id, .io_error_detected = qlcnic_82xx_io_error_detected, .io_slot_reset = qlcnic_82xx_io_slot_reset, .io_resume = qlcnic_82xx_io_resume, .get_beacon_state = qlcnic_82xx_get_beacon_state, .enable_sds_intr = qlcnic_82xx_enable_sds_intr, .disable_sds_intr = qlcnic_82xx_disable_sds_intr, .enable_tx_intr = qlcnic_82xx_enable_tx_intr, .disable_tx_intr = qlcnic_82xx_disable_tx_intr, .get_saved_state = qlcnic_82xx_get_saved_state, .set_saved_state = qlcnic_82xx_set_saved_state, .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values, .get_cap_size = qlcnic_82xx_get_cap_size, .set_sys_info = qlcnic_82xx_set_sys_info, .store_cap_mask = qlcnic_82xx_store_cap_mask, .encap_rx_offload = qlcnic_82xx_encap_rx_offload, .encap_tx_offload = qlcnic_82xx_encap_tx_offload, }; static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_82xx_check(adapter) && (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) { test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); return 0; } else { return 1; } } static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt, int queue_type) { int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS; if (queue_type == QLCNIC_RX_QUEUE) max_rings = adapter->max_sds_rings; else if (queue_type == QLCNIC_TX_QUEUE) max_rings = adapter->max_tx_rings; num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(), max_rings)); if (ring_cnt > num_rings) return num_rings; else return ring_cnt; } void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt) { /* 83xx adapter does not have max_tx_rings intialized in probe */ if (adapter->max_tx_rings) adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt, QLCNIC_TX_QUEUE); else adapter->drv_tx_rings = tx_cnt; } void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt) { /* 83xx adapter does not have max_sds_rings intialized in probe */ if (adapter->max_sds_rings) adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt, QLCNIC_RX_QUEUE); else adapter->drv_sds_rings = rx_cnt; } int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int num_msix = 0, err = 0, vector; adapter->flags &= ~QLCNIC_TSS_RSS; if (adapter->drv_tss_rings > 0) num_msix += adapter->drv_tss_rings; else num_msix += adapter->drv_tx_rings; if (adapter->drv_rss_rings > 0) num_msix += adapter->drv_rss_rings; else num_msix += adapter->drv_sds_rings; if (qlcnic_83xx_check(adapter)) num_msix += 1; if (!adapter->msix_entries) { adapter->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) return -ENOMEM; } for (vector = 0; vector < num_msix; vector++) adapter->msix_entries[vector].entry = vector; restore: err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix); if (err == -ENOSPC) { if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) return err; netdev_info(adapter->netdev, "Unable to allocate %d MSI-X vectors, Available vectors %d\n", num_msix, err); num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings; /* Set rings to 0 so we can restore original TSS/RSS count */ adapter->drv_tss_rings = 0; adapter->drv_rss_rings = 0; if (qlcnic_83xx_check(adapter)) num_msix += 1; netdev_info(adapter->netdev, "Restoring %d Tx, %d SDS rings for total %d vectors.\n", adapter->drv_tx_rings, adapter->drv_sds_rings, num_msix); goto restore; } else if (err < 0) { return err; } adapter->ahw->num_msix = num_msix; if (adapter->drv_tss_rings > 0) adapter->drv_tx_rings = adapter->drv_tss_rings; if (adapter->drv_rss_rings > 0) adapter->drv_sds_rings = adapter->drv_rss_rings; return 0; } int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; int err, vector; if (!adapter->msix_entries) { adapter->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) return -ENOMEM; } adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); if (adapter->ahw->msix_supported) { enable_msix: for (vector = 0; vector < num_msix; vector++) adapter->msix_entries[vector].entry = vector; err = pci_enable_msix_range(pdev, adapter->msix_entries, 1, num_msix); if (err == num_msix) { adapter->flags |= QLCNIC_MSIX_ENABLED; adapter->ahw->num_msix = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); return 0; } else if (err > 0) { pci_disable_msix(pdev); dev_info(&pdev->dev, "Unable to allocate %d MSI-X vectors, Available vectors %d\n", num_msix, err); if (qlcnic_82xx_check(adapter)) { num_msix = rounddown_pow_of_two(err); if (err < QLCNIC_82XX_MINIMUM_VECTOR) return -ENOSPC; } else { num_msix = rounddown_pow_of_two(err - 1); num_msix += 1; if (err < QLCNIC_83XX_MINIMUM_VECTOR) return -ENOSPC; } if (qlcnic_82xx_check(adapter) && !qlcnic_check_multi_tx(adapter)) { adapter->drv_sds_rings = num_msix; adapter->drv_tx_rings = QLCNIC_SINGLE_RING; } else { /* Distribute vectors equally */ adapter->drv_tx_rings = num_msix / 2; adapter->drv_sds_rings = adapter->drv_tx_rings; } if (num_msix) { dev_info(&pdev->dev, "Trying to allocate %d MSI-X interrupt vectors\n", num_msix); goto enable_msix; } } else { dev_info(&pdev->dev, "Unable to allocate %d MSI-X vectors, err=%d\n", num_msix, err); return err; } } return -EIO; } static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter) { int num_msix; num_msix = adapter->drv_sds_rings; if (qlcnic_check_multi_tx(adapter)) num_msix += adapter->drv_tx_rings; else num_msix += QLCNIC_SINGLE_RING; return num_msix; } static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) { int err = 0; u32 offset, mask_reg; const struct qlcnic_legacy_intr_set *legacy_intrp; struct qlcnic_hardware_context *ahw = adapter->ahw; struct pci_dev *pdev = adapter->pdev; if (qlcnic_use_msi && !pci_enable_msi(pdev)) { adapter->flags |= QLCNIC_MSI_ENABLED; offset = msi_tgt_status[adapter->ahw->pci_func]; adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw, offset); dev_info(&pdev->dev, "using msi interrupts\n"); adapter->msix_entries[0].vector = pdev->irq; return err; } if (qlcnic_use_msi || qlcnic_use_msi_x) return -EOPNOTSUPP; legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit; offset = legacy_intrp->tgt_status_reg; adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset); mask_reg = legacy_intrp->tgt_mask_reg; adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg); adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR); adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG); dev_info(&pdev->dev, "using legacy interrupts\n"); adapter->msix_entries[0].vector = pdev->irq; return err; } static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) { int num_msix, err = 0; if (adapter->flags & QLCNIC_TSS_RSS) { err = qlcnic_setup_tss_rss_intr(adapter); if (err < 0) return err; num_msix = adapter->ahw->num_msix; } else { num_msix = qlcnic_82xx_calculate_msix_vector(adapter); err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) return err; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { qlcnic_disable_multi_tx(adapter); adapter->drv_sds_rings = QLCNIC_SINGLE_RING; err = qlcnic_enable_msi_legacy(adapter); if (err) return err; } } return 0; } int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err, i; if (qlcnic_check_multi_tx(adapter) && !ahw->diag_test && (adapter->flags & QLCNIC_MSIX_ENABLED)) { ahw->intr_tbl = vzalloc(array_size(sizeof(struct qlcnic_intrpt_config), ahw->num_msix)); if (!ahw->intr_tbl) return -ENOMEM; for (i = 0; i < ahw->num_msix; i++) { ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; ahw->intr_tbl[i].id = i; ahw->intr_tbl[i].src = 0; } err = qlcnic_82xx_config_intrpt(adapter, 1); if (err) dev_err(&adapter->pdev->dev, "Failed to configure Interrupt for %d vector\n", ahw->num_msix); return err; } return 0; } void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) { if (adapter->flags & QLCNIC_MSIX_ENABLED) pci_disable_msix(adapter->pdev); if (adapter->flags & QLCNIC_MSI_ENABLED) pci_disable_msi(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; if (adapter->ahw->intr_tbl) { vfree(adapter->ahw->intr_tbl); adapter->ahw->intr_tbl = NULL; } } static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw) { if (ahw->pci_base0 != NULL) iounmap(ahw->pci_base0); } static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_pci_info *pci_info; int ret; if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { switch (ahw->port_type) { case QLCNIC_GBE: ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS; break; case QLCNIC_XGBE: ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS; break; } return 0; } if (ahw->op_mode == QLCNIC_MGMT_FUNC) return 0; pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; ret = qlcnic_get_pci_info(adapter, pci_info); kfree(pci_info); return ret; } static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter) { bool ret = false; if (qlcnic_84xx_check(adapter)) { ret = true; } else if (qlcnic_83xx_check(adapter)) { if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG) ret = true; else ret = false; } return ret; } int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_pci_info *pci_info; int i, id = 0, ret = 0, j = 0; u16 act_pci_func; u8 pfn; pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; ret = qlcnic_get_pci_info(adapter, pci_info); if (ret) goto err_pci_info; act_pci_func = ahw->total_nic_func; adapter->npars = kcalloc(act_pci_func, sizeof(struct qlcnic_npar_info), GFP_KERNEL); if (!adapter->npars) { ret = -ENOMEM; goto err_pci_info; } adapter->eswitch = kcalloc(QLCNIC_NIU_MAX_XG_PORTS, sizeof(struct qlcnic_eswitch), GFP_KERNEL); if (!adapter->eswitch) { ret = -ENOMEM; goto err_npars; } for (i = 0; i < ahw->max_vnic_func; i++) { pfn = pci_info[i].id; if (pfn >= ahw->max_vnic_func) { ret = -EINVAL; dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n", __func__, pfn, ahw->max_vnic_func); goto err_eswitch; } if (!pci_info[i].active || (pci_info[i].type != QLCNIC_TYPE_NIC)) continue; if (qlcnic_port_eswitch_cfg_capability(adapter)) { if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn, &id)) adapter->npars[j].eswitch_status = true; else continue; } else { adapter->npars[j].eswitch_status = true; } adapter->npars[j].pci_func = pfn; adapter->npars[j].active = (u8)pci_info[i].active; adapter->npars[j].type = (u8)pci_info[i].type; adapter->npars[j].phy_port = (u8)pci_info[i].default_port; adapter->npars[j].min_bw = pci_info[i].tx_min_bw; adapter->npars[j].max_bw = pci_info[i].tx_max_bw; memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN); j++; } /* Update eSwitch status for adapters without per port eSwitch * configuration capability */ if (!qlcnic_port_eswitch_cfg_capability(adapter)) { for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; } kfree(pci_info); return 0; err_eswitch: kfree(adapter->eswitch); adapter->eswitch = NULL; err_npars: kfree(adapter->npars); adapter->npars = NULL; err_pci_info: kfree(pci_info); return ret; } static int qlcnic_set_function_modes(struct qlcnic_adapter *adapter) { u8 id; int ret; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; ret = qlcnic_api_lock(adapter); if (ret) goto err_lock; id = ahw->pci_func; data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); data = (data & ~QLC_DEV_SET_DRV(0xf, id)) | QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id); QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); qlcnic_api_unlock(adapter); err_lock: return ret; } static void qlcnic_check_vf(struct qlcnic_adapter *adapter, const struct pci_device_id *ent) { u32 op_mode, priv_level; /* Determine FW API version */ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_API); /* Find PCI function number */ qlcnic_get_func_no(adapter); /* Determine function privilege level */ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); if (op_mode == QLC_DEV_DRV_DEFAULT) priv_level = QLCNIC_MGMT_FUNC; else priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); if (priv_level == QLCNIC_NON_PRIV_FUNC) { adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC; dev_info(&adapter->pdev->dev, "HAL Version: %d Non Privileged function\n", adapter->ahw->fw_hal_version); adapter->nic_ops = &qlcnic_vf_ops; } else adapter->nic_ops = &qlcnic_ops; } #define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL #define QLCNIC_83XX_BAR0_LENGTH 0x4000 static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) { switch (dev_id) { case PCI_DEVICE_ID_QLOGIC_QLE824X: *bar = QLCNIC_82XX_BAR0_LENGTH; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: *bar = QLCNIC_83XX_BAR0_LENGTH; break; default: *bar = 0; } } static int qlcnic_setup_pci_map(struct pci_dev *pdev, struct qlcnic_hardware_context *ahw) { u32 offset; void __iomem *mem_ptr0 = NULL; unsigned long mem_len, pci_len0 = 0, bar0_len; /* remap phys address */ mem_len = pci_resource_len(pdev, 0); qlcnic_get_bar_length(pdev->device, &bar0_len); if (mem_len >= bar0_len) { mem_ptr0 = pci_ioremap_bar(pdev, 0); if (mem_ptr0 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); return -EIO; } pci_len0 = mem_len; } else { return -EIO; } dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10)); ahw->pci_base0 = mem_ptr0; ahw->pci_len0 = pci_len0; offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func)); qlcnic_get_ioaddr(ahw, offset); return 0; } static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, int index) { struct pci_dev *pdev = adapter->pdev; unsigned short subsystem_vendor; bool ret = true; subsystem_vendor = pdev->subsystem_vendor; if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X || pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { if (qlcnic_boards[index].sub_vendor == subsystem_vendor && qlcnic_boards[index].sub_device == pdev->subsystem_device) ret = true; else ret = false; } return ret; } static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) { struct pci_dev *pdev = adapter->pdev; int i, found = 0; for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { if (qlcnic_boards[i].vendor == pdev->vendor && qlcnic_boards[i].device == pdev->device && qlcnic_validate_subsystem_id(adapter, i)) { found = 1; break; } } if (!found) sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); else sprintf(name, "%pM: %s" , adapter->mac_addr, qlcnic_boards[i].short_name); } static void qlcnic_check_options(struct qlcnic_adapter *adapter) { int err; u32 fw_major, fw_minor, fw_build, prev_fw_version; struct pci_dev *pdev = adapter->pdev; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; prev_fw_version = adapter->fw_version; fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); err = qlcnic_get_board_info(adapter); if (err) { dev_err(&pdev->dev, "Error getting board config info.\n"); return; } if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { if (fw_dump->tmpl_hdr == NULL || adapter->fw_version > prev_fw_version) { vfree(fw_dump->tmpl_hdr); if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) dev_info(&pdev->dev, "Supports FW dump capability\n"); } } dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n", QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); if (adapter->ahw->port_type == QLCNIC_XGBE) { if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; } else { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; } adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (adapter->ahw->port_type == QLCNIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; } adapter->ahw->msix_supported = !!qlcnic_use_msi_x; adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } static int qlcnic_initialize_nic(struct qlcnic_adapter *adapter) { struct qlcnic_info nic_info; int err = 0; memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); if (err) return err; adapter->ahw->physical_port = (u8)nic_info.phys_port; adapter->ahw->switch_mode = nic_info.switch_mode; adapter->ahw->max_tx_ques = nic_info.max_tx_ques; adapter->ahw->max_rx_ques = nic_info.max_rx_ques; adapter->ahw->capabilities = nic_info.capabilities; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { u32 temp; temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err); if (err == -EIO) return err; adapter->ahw->extra_capability[0] = temp; } else { adapter->ahw->extra_capability[0] = 0; } adapter->ahw->max_mac_filters = nic_info.max_mac_filters; adapter->ahw->max_mtu = nic_info.max_mtu; if (adapter->ahw->capabilities & BIT_6) { adapter->flags |= QLCNIC_ESWITCH_ENABLED; adapter->ahw->nic_mode = QLCNIC_VNIC_MODE; adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS; adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n"); } else { adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; } return err; } void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { if (esw_cfg->discard_tagged) adapter->flags &= ~QLCNIC_TAGGING_ENABLED; else adapter->flags |= QLCNIC_TAGGING_ENABLED; if (esw_cfg->vlan_id) { adapter->rx_pvid = esw_cfg->vlan_id; adapter->tx_pvid = esw_cfg->vlan_id; } else { adapter->rx_pvid = 0; adapter->tx_pvid = 0; } } static int qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; if (qlcnic_sriov_vf_check(adapter)) { err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1); if (err) { netdev_err(netdev, "Cannot add VLAN filter for VLAN id %d, err=%d", vid, err); return err; } } set_bit(vid, adapter->vlans); return 0; } static int qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; if (qlcnic_sriov_vf_check(adapter)) { err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0); if (err) { netdev_err(netdev, "Cannot delete VLAN filter for VLAN id %d, err=%d", vid, err); return err; } } qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); clear_bit(vid, adapter->vlans); return 0; } void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | QLCNIC_PROMISC_DISABLED); if (esw_cfg->mac_anti_spoof) adapter->flags |= QLCNIC_MACSPOOF; if (!esw_cfg->mac_override) adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; if (!esw_cfg->promisc_mode) adapter->flags |= QLCNIC_PROMISC_DISABLED; } int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) { struct qlcnic_esw_func_cfg esw_cfg; if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return 0; esw_cfg.pci_func = adapter->ahw->pci_func; if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) return -EIO; qlcnic_set_vlan_config(adapter, &esw_cfg); qlcnic_set_eswitch_port_features(adapter, &esw_cfg); qlcnic_set_netdev_features(adapter, &esw_cfg); return 0; } void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; if (qlcnic_83xx_check(adapter)) return; adapter->offload_flags = esw_cfg->offload_flags; adapter->flags |= QLCNIC_APP_CHANGED_FLAGS; netdev_update_features(netdev); adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS; } static int qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) { u32 op_mode, priv_level; int err = 0; err = qlcnic_initialize_nic(adapter); if (err) return err; if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) return 0; op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); if (op_mode == QLC_DEV_DRV_DEFAULT) priv_level = QLCNIC_MGMT_FUNC; else priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { if (priv_level == QLCNIC_MGMT_FUNC) { adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; err = qlcnic_init_pci_info(adapter); if (err) return err; /* Set privilege level for other functions */ qlcnic_set_function_modes(adapter); dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n", adapter->ahw->fw_hal_version); } else if (priv_level == QLCNIC_PRIV_FUNC) { adapter->ahw->op_mode = QLCNIC_PRIV_FUNC; dev_info(&adapter->pdev->dev, "HAL Version: %d, Privileged function\n", adapter->ahw->fw_hal_version); } } else { adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; } adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; return err; } int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) { struct qlcnic_esw_func_cfg esw_cfg; struct qlcnic_npar_info *npar; u8 i; if (adapter->need_fw_reset) return 0; for (i = 0; i < adapter->ahw->total_nic_func; i++) { if (!adapter->npars[i].eswitch_status) continue; memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); esw_cfg.pci_func = adapter->npars[i].pci_func; esw_cfg.mac_override = BIT_0; esw_cfg.promisc_mode = BIT_0; if (qlcnic_82xx_check(adapter)) { esw_cfg.offload_flags = BIT_0; if (QLCNIC_IS_TSO_CAPABLE(adapter)) esw_cfg.offload_flags |= (BIT_1 | BIT_2); } if (qlcnic_config_switch_port(adapter, &esw_cfg)) return -EIO; npar = &adapter->npars[i]; npar->pvid = esw_cfg.vlan_id; npar->mac_override = esw_cfg.mac_override; npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; npar->discard_tagged = esw_cfg.discard_tagged; npar->promisc_mode = esw_cfg.promisc_mode; npar->offload_flags = esw_cfg.offload_flags; } return 0; } static int qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, struct qlcnic_npar_info *npar, int pci_func) { struct qlcnic_esw_func_cfg esw_cfg; esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; esw_cfg.pci_func = pci_func; esw_cfg.vlan_id = npar->pvid; esw_cfg.mac_override = npar->mac_override; esw_cfg.discard_tagged = npar->discard_tagged; esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; esw_cfg.offload_flags = npar->offload_flags; esw_cfg.promisc_mode = npar->promisc_mode; if (qlcnic_config_switch_port(adapter, &esw_cfg)) return -EIO; esw_cfg.op_mode = QLCNIC_ADD_VLAN; if (qlcnic_config_switch_port(adapter, &esw_cfg)) return -EIO; return 0; } int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) { int i, err; struct qlcnic_npar_info *npar; struct qlcnic_info nic_info; u8 pci_func; if (qlcnic_82xx_check(adapter)) if (!adapter->need_fw_reset) return 0; /* Set the NPAR config data after FW reset */ for (i = 0; i < adapter->ahw->total_nic_func; i++) { npar = &adapter->npars[i]; pci_func = npar->pci_func; if (!adapter->npars[i].eswitch_status) continue; memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (err) return err; nic_info.min_tx_bw = npar->min_bw; nic_info.max_tx_bw = npar->max_bw; err = qlcnic_set_nic_info(adapter, &nic_info); if (err) return err; if (npar->enable_pm) { err = qlcnic_config_port_mirroring(adapter, npar->dest_npar, 1, pci_func); if (err) return err; } err = qlcnic_reset_eswitch_config(adapter, npar, pci_func); if (err) return err; } return 0; } static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) { u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; u32 npar_state; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) return 0; npar_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { msleep(1000); npar_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); } if (!npar_opt_timeo) { dev_err(&adapter->pdev->dev, "Waiting for NPAR state to operational timeout\n"); return -EIO; } return 0; } static int qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) { int err; if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return 0; err = qlcnic_set_default_offload_settings(adapter); if (err) return err; err = qlcnic_reset_npar_config(adapter); if (err) return err; qlcnic_dev_set_npar_ready(adapter); return err; } static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter) { int err; err = qlcnic_can_start_firmware(adapter); if (err < 0) return err; else if (!err) goto check_fw_status; if (qlcnic_load_fw_file) qlcnic_request_firmware(adapter); else { err = qlcnic_check_flash_fw_ver(adapter); if (err) goto err_out; adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE; } err = qlcnic_need_fw_reset(adapter); if (err == 0) goto check_fw_status; err = qlcnic_pinit_from_rom(adapter); if (err) goto err_out; err = qlcnic_load_firmware(adapter); if (err) goto err_out; qlcnic_release_firmware(adapter); QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); check_fw_status: err = qlcnic_check_fw_status(adapter); if (err) goto err_out; QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); qlcnic_idc_debug_info(adapter, 1); err = qlcnic_check_eswitch_mode(adapter); if (err) { dev_err(&adapter->pdev->dev, "Memory allocation failed for eswitch\n"); goto err_out; } err = qlcnic_set_mgmt_operations(adapter); if (err) goto err_out; qlcnic_check_options(adapter); adapter->need_fw_reset = 0; qlcnic_release_firmware(adapter); return 0; err_out: QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); dev_err(&adapter->pdev->dev, "Device state set to failed\n"); qlcnic_release_firmware(adapter); return err; } static int qlcnic_request_irq(struct qlcnic_adapter *adapter) { irq_handler_t handler; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; int err, ring, num_sds_rings; unsigned long flags = 0; struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { if (qlcnic_82xx_check(adapter)) handler = qlcnic_tmp_intr; else handler = qlcnic_83xx_tmp_intr; if (!QLCNIC_IS_MSI_FAMILY(adapter)) flags |= IRQF_SHARED; } else { if (adapter->flags & QLCNIC_MSIX_ENABLED) handler = qlcnic_msix_intr; else if (adapter->flags & QLCNIC_MSI_ENABLED) handler = qlcnic_msi_intr; else { flags |= IRQF_SHARED; if (qlcnic_82xx_check(adapter)) handler = qlcnic_intr; else handler = qlcnic_83xx_intr; } } adapter->irq = netdev->irq; if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { if (qlcnic_82xx_check(adapter) || (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED))) { num_sds_rings = adapter->drv_sds_rings; for (ring = 0; ring < num_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_82xx_check(adapter) && !qlcnic_check_multi_tx(adapter) && (ring == (num_sds_rings - 1))) { if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) snprintf(sds_ring->name, sizeof(sds_ring->name), "qlcnic"); else snprintf(sds_ring->name, sizeof(sds_ring->name), "%s-tx-0-rx-%d", netdev->name, ring); } else { snprintf(sds_ring->name, sizeof(sds_ring->name), "%s-rx-%d", netdev->name, ring); } err = request_irq(sds_ring->irq, handler, flags, sds_ring->name, sds_ring); if (err) return err; } } if ((qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter)) || (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED))) { handler = qlcnic_msix_tx_intr; for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; snprintf(tx_ring->name, sizeof(tx_ring->name), "%s-tx-%d", netdev->name, ring); err = request_irq(tx_ring->irq, handler, flags, tx_ring->name, tx_ring); if (err) return err; } } } return 0; } static void qlcnic_free_irq(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { if (qlcnic_82xx_check(adapter) || (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED))) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; free_irq(sds_ring->irq, sds_ring); } } if ((qlcnic_83xx_check(adapter) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) || (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring->irq) free_irq(tx_ring->irq, tx_ring); } } } } static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter) { u32 capab = 0; if (qlcnic_82xx_check(adapter)) { if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; } else { capab = adapter->ahw->capabilities; if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab)) adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; } } static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; /* Initialize interrupt coalesce parameters */ ahw->coal.flag = QLCNIC_INTR_DEFAULT; if (qlcnic_83xx_check(adapter)) { ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX; ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US; ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS; ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; err = qlcnic_83xx_set_rx_tx_intr_coal(adapter); } else { ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX; ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; err = qlcnic_82xx_set_rx_coalesce(adapter); } return err; } int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) { int ring; struct qlcnic_host_rds_ring *rds_ring; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return -EIO; if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) return 0; if (qlcnic_set_eswitch_port_config(adapter)) return -EIO; qlcnic_get_lro_mss_capability(adapter); if (qlcnic_fw_create_ctx(adapter)) return -EIO; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; qlcnic_post_rx_buffers(adapter, rds_ring, ring); } qlcnic_set_multi(netdev); qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); adapter->ahw->linkup = 0; if (adapter->drv_sds_rings > 1) qlcnic_config_rss(adapter, 1); qlcnic_config_def_intr_coalesce(adapter); if (netdev->features & NETIF_F_LRO) qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); set_bit(__QLCNIC_DEV_UP, &adapter->state); qlcnic_napi_enable(adapter); qlcnic_linkevent_request(adapter, 1); adapter->ahw->reset_context = 0; netif_tx_start_all_queues(netdev); return 0; } int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) { int err = 0; rtnl_lock(); if (netif_running(netdev)) err = __qlcnic_up(adapter, netdev); rtnl_unlock(); return err; } void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) { int ring; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) return; smp_mb(); netif_carrier_off(netdev); adapter->ahw->linkup = 0; netif_tx_disable(netdev); qlcnic_free_mac_list(adapter); if (adapter->fhash.fnum) qlcnic_delete_lb_filters(adapter); qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); qlcnic_napi_disable(adapter); qlcnic_fw_destroy_ctx(adapter); adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP; qlcnic_reset_rx_buffers_list(adapter); for (ring = 0; ring < adapter->drv_tx_rings; ring++) qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); } /* Usage: During suspend and firmware recovery module */ void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) { rtnl_lock(); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); rtnl_unlock(); } int qlcnic_attach(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err; if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) return 0; err = qlcnic_napi_add(adapter, netdev); if (err) return err; err = qlcnic_alloc_sw_resources(adapter); if (err) { dev_err(&pdev->dev, "Error in setting sw resources\n"); goto err_out_napi_del; } err = qlcnic_alloc_hw_resources(adapter); if (err) { dev_err(&pdev->dev, "Error in setting hw resources\n"); goto err_out_free_sw; } err = qlcnic_request_irq(adapter); if (err) { dev_err(&pdev->dev, "failed to setup interrupt\n"); goto err_out_free_hw; } qlcnic_create_sysfs_entries(adapter); if (qlcnic_encap_rx_offload(adapter)) udp_tunnel_nic_reset_ntf(netdev); adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; err_out_free_hw: qlcnic_free_hw_resources(adapter); err_out_free_sw: qlcnic_free_sw_resources(adapter); err_out_napi_del: qlcnic_napi_del(adapter); return err; } void qlcnic_detach(struct qlcnic_adapter *adapter) { if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; qlcnic_remove_sysfs_entries(adapter); qlcnic_free_hw_resources(adapter); qlcnic_release_rx_buffers(adapter); qlcnic_free_irq(adapter); qlcnic_napi_del(adapter); qlcnic_free_sw_resources(adapter); adapter->is_up = 0; } void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; int drv_tx_rings = adapter->drv_tx_rings; int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_disable_sds_intr(adapter, sds_ring); } } qlcnic_fw_destroy_ctx(adapter); qlcnic_detach(adapter); adapter->ahw->diag_test = 0; adapter->drv_sds_rings = drv_sds_rings; adapter->drv_tx_rings = drv_tx_rings; if (qlcnic_attach(adapter)) goto out; if (netif_running(netdev)) __qlcnic_up(adapter, netdev); out: netif_device_attach(netdev); } static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err = 0; adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), GFP_KERNEL); if (!adapter->recv_ctx) { err = -ENOMEM; goto err_out; } if (qlcnic_83xx_check(adapter)) { ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX; ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US; ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS; ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; } else { ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX; ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; } /* clear stats */ memset(&adapter->stats, 0, sizeof(adapter->stats)); err_out: return err; } static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; kfree(adapter->recv_ctx); adapter->recv_ctx = NULL; if (fw_dump->tmpl_hdr) { vfree(fw_dump->tmpl_hdr); fw_dump->tmpl_hdr = NULL; } if (fw_dump->dma_buffer) { dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE, fw_dump->dma_buffer, fw_dump->phys_addr); fw_dump->dma_buffer = NULL; } kfree(adapter->ahw->reset.buff); adapter->ahw->fw_dump.tmpl_hdr = NULL; } int qlcnic_diag_alloc_res(struct net_device *netdev, int test) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; int ring; int ret; netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; ret = qlcnic_attach(adapter); if (ret) { netif_device_attach(netdev); return ret; } ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); netif_device_attach(netdev); return ret; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; qlcnic_post_rx_buffers(adapter, rds_ring, ring); } if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_enable_sds_intr(adapter, sds_ring); } } if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { adapter->ahw->loopback_state = 0; qlcnic_linkevent_request(adapter, 1); } set_bit(__QLCNIC_DEV_UP, &adapter->state); return 0; } /* Reset context in hardware only */ static int qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; netif_device_detach(netdev); qlcnic_down(adapter, netdev); qlcnic_up(adapter, netdev); netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__); return 0; } int qlcnic_reset_context(struct qlcnic_adapter *adapter) { int err = 0; struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); if (netif_running(netdev)) { err = qlcnic_attach(adapter); if (!err) { __qlcnic_up(adapter, netdev); qlcnic_restore_indev_addr(netdev, NETDEV_UP); } } netif_device_attach(netdev); } clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; } static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u16 act_pci_fn = ahw->total_nic_func; u16 count; ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; if (act_pci_fn <= 2) count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) / act_pci_fn; else count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) / act_pci_fn; ahw->max_uc_count = count; } static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, u8 tx_queues, u8 rx_queues) { struct net_device *netdev = adapter->netdev; int err = 0; if (tx_queues) { err = netif_set_real_num_tx_queues(netdev, tx_queues); if (err) { netdev_err(netdev, "failed to set %d Tx queues\n", tx_queues); return err; } } if (rx_queues) { err = netif_set_real_num_rx_queues(netdev, rx_queues); if (err) netdev_err(netdev, "failed to set %d Rx queues\n", rx_queues); } return err; } int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev) { int err; struct pci_dev *pdev = adapter->pdev; adapter->rx_csum = 1; adapter->ahw->mc_enabled = 0; qlcnic_set_mac_filter_count(adapter); netdev->netdev_ops = &qlcnic_netdev_ops; netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; qlcnic_change_mtu(netdev, netdev->mtu); netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ? &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops; netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA); netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA); if (QLCNIC_IS_TSO_CAPABLE(adapter)) { netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); } if (qlcnic_vlan_tx_check(adapter)) netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX); if (qlcnic_sriov_vf_check(adapter)) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) netdev->features |= NETIF_F_LRO; if (qlcnic_encap_tx_offload(adapter)) { netdev->features |= NETIF_F_GSO_UDP_TUNNEL; /* encapsulation Tx offload supported by Adapter */ netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_TSO | NETIF_F_TSO6; } if (qlcnic_encap_rx_offload(adapter)) { netdev->hw_enc_features |= NETIF_F_RXCSUM; netdev->udp_tunnel_nic_info = &qlcnic_udp_tunnels; } netdev->hw_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; /* MTU range: 68 - 9600 */ netdev->min_mtu = P3P_MIN_MTU; netdev->max_mtu = P3P_MAX_MTU; err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, adapter->drv_sds_rings); if (err) return err; qlcnic_dcb_init_dcbnl_ops(adapter->dcb); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "failed to register net device\n"); return err; } return 0; } void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_tx_ring *tx_ring; for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (tx_ring) { vfree(tx_ring->cmd_buf_arr); tx_ring->cmd_buf_arr = NULL; } } kfree(adapter->tx_ring); } int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, struct net_device *netdev) { int ring, vector, index; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_cmd_buffer *cmd_buf_arr; tx_ring = kcalloc(adapter->drv_tx_rings, sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL); if (tx_ring == NULL) return -ENOMEM; adapter->tx_ring = tx_ring; for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, ring); cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { qlcnic_free_tx_rings(adapter); return -ENOMEM; } tx_ring->cmd_buf_arr = cmd_buf_arr; spin_lock_init(&tx_ring->tx_clean_lock); } if (qlcnic_83xx_check(adapter) || (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_ring->adapter = adapter; if (adapter->flags & QLCNIC_MSIX_ENABLED) { index = adapter->drv_sds_rings + ring; vector = adapter->msix_entries[index].vector; tx_ring->irq = vector; } } } return 0; } void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 fw_cmd = 0; if (qlcnic_82xx_check(adapter)) fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER; else if (qlcnic_83xx_check(adapter)) fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER) qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); } /* Reset firmware API lock */ static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter) { qlcnic_api_lock(adapter); qlcnic_api_unlock(adapter); } static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct qlcnic_adapter *adapter = NULL; struct qlcnic_hardware_context *ahw; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ int err; err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { err = -ENODEV; goto err_out_disable_pdev; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); goto err_out_disable_pdev; } err = pci_request_regions(pdev, qlcnic_driver_name); if (err) goto err_out_disable_pdev; pci_set_master(pdev); ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); if (!ahw) { err = -ENOMEM; goto err_out_free_res; } switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_QLE824X: ahw->hw_ops = &qlcnic_hw_ops; ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: qlcnic_83xx_register_map(ahw); break; case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: qlcnic_sriov_vf_register_map(ahw); break; default: err = -EINVAL; goto err_out_free_hw_res; } err = qlcnic_setup_pci_map(pdev, ahw); if (err) goto err_out_free_hw_res; netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter), QLCNIC_MAX_TX_RINGS); if (!netdev) { err = -ENOMEM; goto err_out_iounmap; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->ahw = ahw; adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); if (adapter->qlcnic_wq == NULL) { err = -ENOMEM; dev_err(&pdev->dev, "Failed to create workqueue\n"); goto err_out_free_netdev; } err = qlcnic_alloc_adapter_resources(adapter); if (err) goto err_out_free_wq; adapter->dev_rst_time = jiffies; ahw->revision_id = pdev->revision; ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter); if (qlcnic_mac_learn == FDB_MAC_LEARN) adapter->fdb_mac_learn = true; else if (qlcnic_mac_learn == DRV_MAC_LEARN) adapter->drv_mac_learn = true; rwlock_init(&adapter->ahw->crb_lock); mutex_init(&adapter->ahw->mem_lock); INIT_LIST_HEAD(&adapter->mac_list); qlcnic_register_dcb(adapter); if (qlcnic_82xx_check(adapter)) { qlcnic_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; qlcnic_reset_api_lock(adapter); err = qlcnic_start_firmware(adapter); if (err) { dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" "\t\tIf reboot doesn't help, try flashing the card\n"); goto err_out_maintenance_mode; } /* compute and set default and max tx/sds rings */ if (adapter->ahw->msix_supported) { if (qlcnic_check_multi_tx_capability(adapter) == 1) qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); else qlcnic_set_tx_ring_count(adapter, QLCNIC_DEF_TX_RINGS); qlcnic_set_sds_ring_count(adapter, QLCNIC_DEF_SDS_RINGS); } else { qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); } err = qlcnic_setup_idc_param(adapter); if (err) goto err_out_free_hw; adapter->flags |= QLCNIC_NEED_FLR; } else if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; err = qlcnic_83xx_init(adapter); if (err) { switch (err) { case -ENOTRECOVERABLE: dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n"); dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n"); goto err_out_free_hw; case -ENOMEM: dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); goto err_out_free_hw; case -EOPNOTSUPP: dev_err(&pdev->dev, "Adapter initialization failed\n"); goto err_out_free_hw; default: dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n"); goto err_out_maintenance_mode; } } if (qlcnic_sriov_vf_check(adapter)) return 0; } else { dev_err(&pdev->dev, "%s: failed. Please Reboot\n", __func__); err = -ENODEV; goto err_out_free_hw; } if (qlcnic_read_mac_addr(adapter)) dev_warn(&pdev->dev, "failed to read mac addr\n"); qlcnic_read_phys_port_id(adapter); if (adapter->portnum == 0) { qlcnic_get_board_name(adapter, board_name); pr_info("%s: %s Board Chip rev 0x%x\n", module_name(THIS_MODULE), board_name, adapter->ahw->revision_id); } if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && !!qlcnic_use_msi) dev_warn(&pdev->dev, "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { err = qlcnic_dcb_enable(adapter->dcb); if (err) { qlcnic_dcb_free(adapter->dcb); dev_err(&pdev->dev, "Failed to enable DCB\n"); goto err_out_free_hw; } qlcnic_dcb_get_info(adapter->dcb); err = qlcnic_setup_intr(adapter); if (err) { dev_err(&pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; } } err = qlcnic_get_act_pci_func(adapter); if (err) goto err_out_disable_mbx_intr; if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); err = qlcnic_setup_netdev(adapter, netdev); if (err) goto err_out_disable_mbx_intr; pci_set_drvdata(pdev, adapter); if (qlcnic_82xx_check(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); switch (adapter->ahw->port_type) { case QLCNIC_GBE: dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", adapter->netdev->name); break; case QLCNIC_XGBE: dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); break; } if (adapter->drv_mac_learn) qlcnic_alloc_lb_filters_mem(adapter); qlcnic_add_sysfs(adapter); qlcnic_register_hwmon_dev(adapter); return 0; err_out_disable_mbx_intr: if (qlcnic_83xx_check(adapter)) qlcnic_83xx_free_mbx_intr(adapter); err_out_disable_msi: qlcnic_teardown_intr(adapter); qlcnic_cancel_idc_work(adapter); qlcnic_clr_all_drv_state(adapter, 0); err_out_free_hw: qlcnic_free_adapter_resources(adapter); err_out_free_wq: destroy_workqueue(adapter->qlcnic_wq); err_out_free_netdev: free_netdev(netdev); err_out_iounmap: qlcnic_cleanup_pci_map(ahw); err_out_free_hw_res: kfree(ahw); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); return err; err_out_maintenance_mode: set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); netdev->netdev_ops = &qlcnic_netdev_failed_ops; netdev->ethtool_ops = &qlcnic_ethtool_failed_ops; ahw->port_type = QLCNIC_XGBE; if (qlcnic_83xx_check(adapter)) adapter->tgt_status_reg = NULL; else ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS; err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); qlcnic_clr_all_drv_state(adapter, 0); goto err_out_free_hw; } pci_set_drvdata(pdev, adapter); qlcnic_add_sysfs(adapter); return 0; } static void qlcnic_remove(struct pci_dev *pdev) { struct qlcnic_adapter *adapter; struct net_device *netdev; struct qlcnic_hardware_context *ahw; adapter = pci_get_drvdata(pdev); if (adapter == NULL) return; netdev = adapter->netdev; qlcnic_cancel_idc_work(adapter); qlcnic_sriov_pf_disable(adapter); ahw = adapter->ahw; unregister_netdev(netdev); qlcnic_sriov_cleanup(adapter); if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_initialize_nic(adapter, 0); cancel_delayed_work_sync(&adapter->idc_aen_work); qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); kfree(ahw->fw_info); } qlcnic_dcb_free(adapter->dcb); qlcnic_detach(adapter); kfree(adapter->npars); kfree(adapter->eswitch); if (qlcnic_82xx_check(adapter)) qlcnic_clr_all_drv_state(adapter, 0); clear_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_free_lb_filters_mem(adapter); qlcnic_teardown_intr(adapter); qlcnic_remove_sysfs(adapter); qlcnic_unregister_hwmon_dev(adapter); qlcnic_cleanup_pci_map(adapter->ahw); qlcnic_release_firmware(adapter); pci_release_regions(pdev); pci_disable_device(pdev); if (adapter->qlcnic_wq) { destroy_workqueue(adapter->qlcnic_wq); adapter->qlcnic_wq = NULL; } qlcnic_free_adapter_resources(adapter); kfree(ahw); free_netdev(netdev); } static void qlcnic_shutdown(struct pci_dev *pdev) { if (__qlcnic_shutdown(pdev)) return; pci_disable_device(pdev); } static int __maybe_unused qlcnic_suspend(struct device *dev_d) { return __qlcnic_shutdown(to_pci_dev(dev_d)); } static int __maybe_unused qlcnic_resume(struct device *dev_d) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev_d); return __qlcnic_resume(adapter); } static int qlcnic_open(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { netdev_err(netdev, "%s: Device is in non-operational state\n", __func__); return -EIO; } netif_carrier_off(netdev); err = qlcnic_attach(adapter); if (err) return err; err = __qlcnic_up(adapter, netdev); if (err) qlcnic_detach(adapter); return err; } /* * qlcnic_close - Disables a network interface entry point */ static int qlcnic_close(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); __qlcnic_down(adapter, netdev); return 0; } #define QLCNIC_VF_LB_BUCKET_SIZE 1 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) { void *head; int i; struct net_device *netdev = adapter->netdev; u32 filter_size = 0; u16 act_pci_func = 0; if (adapter->fhash.fmax && adapter->fhash.fhead) return; act_pci_func = adapter->ahw->total_nic_func; spin_lock_init(&adapter->mac_learn_lock); spin_lock_init(&adapter->rx_mac_learn_lock); if (qlcnic_sriov_vf_check(adapter)) { filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1; adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE; } else if (qlcnic_82xx_check(adapter)) { filter_size = QLCNIC_LB_MAX_FILTERS; adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; } else { filter_size = QLC_83XX_LB_MAX_FILTERS; adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE; } head = kcalloc(adapter->fhash.fbucket_size, sizeof(struct hlist_head), GFP_ATOMIC); if (!head) return; adapter->fhash.fmax = (filter_size / act_pci_func); adapter->fhash.fhead = head; netdev_info(netdev, "active nic func = %d, mac filter size=%d\n", act_pci_func, adapter->fhash.fmax); for (i = 0; i < adapter->fhash.fbucket_size; i++) INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size; head = kcalloc(adapter->rx_fhash.fbucket_size, sizeof(struct hlist_head), GFP_ATOMIC); if (!head) return; adapter->rx_fhash.fmax = (filter_size / act_pci_func); adapter->rx_fhash.fhead = head; for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]); } static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) { if (adapter->fhash.fmax) kfree(adapter->fhash.fhead); adapter->fhash.fhead = NULL; adapter->fhash.fmax = 0; if (adapter->rx_fhash.fmax) kfree(adapter->rx_fhash.fhead); adapter->rx_fhash.fmax = 0; adapter->rx_fhash.fhead = NULL; } int qlcnic_check_temp(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 temp_state, temp_val, temp = 0; int rv = 0; if (qlcnic_83xx_check(adapter)) temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP); if (qlcnic_82xx_check(adapter)) temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP); temp_state = qlcnic_get_temp_state(temp); temp_val = qlcnic_get_temp_val(temp); if (temp_state == QLCNIC_TEMP_PANIC) { dev_err(&netdev->dev, "Device temperature %d degrees C exceeds" " maximum allowed. Hardware has been shut down.\n", temp_val); rv = 1; } else if (temp_state == QLCNIC_TEMP_WARN) { if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) { dev_err(&netdev->dev, "Device temperature %d degrees C " "exceeds operating range." " Immediate action needed.\n", temp_val); } } else { if (adapter->ahw->temp == QLCNIC_TEMP_WARN) { dev_info(&netdev->dev, "Device temperature is now %d degrees C" " in normal range.\n", temp_val); } } adapter->ahw->temp = temp_state; return rv; } static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring) { int i; for (i = 0; i < tx_ring->num_desc; i++) { pr_info("TX Desc: %d\n", i); print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, &tx_ring->desc_head[i], sizeof(struct cmd_desc_type0), true); } } static void qlcnic_dump_rings(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct net_device *netdev = adapter->netdev; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; int ring; if (!netdev || !netif_running(netdev)) return; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; if (!rds_ring) continue; netdev_info(netdev, "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n", ring, readl(rds_ring->crb_rcv_producer), rds_ring->producer, rds_ring->num_desc); } for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &(recv_ctx->sds_rings[ring]); if (!sds_ring) continue; netdev_info(netdev, "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n", ring, readl(sds_ring->crb_sts_consumer), sds_ring->consumer, readl(sds_ring->crb_intr_mask), sds_ring->num_desc); } for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; if (!tx_ring) continue; netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n", ring, tx_ring->ctx_id); netdev_info(netdev, "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n", tx_ring->tx_stats.xmit_finished, tx_ring->tx_stats.xmit_called, tx_ring->tx_stats.xmit_on, tx_ring->tx_stats.xmit_off); if (tx_ring->crb_intr_mask) netdev_info(netdev, "crb_intr_mask=%d\n", readl(tx_ring->crb_intr_mask)); netdev_info(netdev, "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", readl(tx_ring->crb_cmd_producer), tx_ring->producer, tx_ring->sw_consumer, le32_to_cpu(*(tx_ring->hw_consumer))); netdev_info(netdev, "Total desc=%d, Available desc=%d\n", tx_ring->num_desc, qlcnic_tx_avail(tx_ring)); if (netif_msg_tx_err(adapter->ahw)) dump_tx_ring_desc(tx_ring); } } static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; qlcnic_dump_rings(adapter); if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS || netif_msg_tx_err(adapter->ahw)) { netdev_err(netdev, "Tx timeout, reset the adapter.\n"); if (qlcnic_82xx_check(adapter)) adapter->need_fw_reset = 1; else if (qlcnic_83xx_check(adapter)) qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); } else { netdev_err(netdev, "Tx timeout, reset adapter context.\n"); adapter->ahw->reset_context = 1; } } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_update_stats(adapter); stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; return stats; } static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter) { u32 status; status = readl(adapter->isr_int_vec); if (!(status & adapter->ahw->int_vec_bit)) return IRQ_NONE; /* check interrupt state machine, to be sure */ status = readl(adapter->crb_int_state_reg); if (!ISR_LEGACY_INT_TRIGGERED(status)) return IRQ_NONE; writel(0xffffffff, adapter->tgt_status_reg); /* read twice to ensure write is flushed */ readl(adapter->isr_int_vec); readl(adapter->isr_int_vec); return IRQ_HANDLED; } static irqreturn_t qlcnic_tmp_intr(int irq, void *data) { struct qlcnic_host_sds_ring *sds_ring = data; struct qlcnic_adapter *adapter = sds_ring->adapter; if (adapter->flags & QLCNIC_MSIX_ENABLED) goto done; else if (adapter->flags & QLCNIC_MSI_ENABLED) { writel(0xffffffff, adapter->tgt_status_reg); goto done; } if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) return IRQ_NONE; done: adapter->ahw->diag_cnt++; qlcnic_enable_sds_intr(adapter, sds_ring); return IRQ_HANDLED; } static irqreturn_t qlcnic_intr(int irq, void *data) { struct qlcnic_host_sds_ring *sds_ring = data; struct qlcnic_adapter *adapter = sds_ring->adapter; if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) return IRQ_NONE; napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t qlcnic_msi_intr(int irq, void *data) { struct qlcnic_host_sds_ring *sds_ring = data; struct qlcnic_adapter *adapter = sds_ring->adapter; /* clear interrupt */ writel(0xffffffff, adapter->tgt_status_reg); napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t qlcnic_msix_intr(int irq, void *data) { struct qlcnic_host_sds_ring *sds_ring = data; napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data) { struct qlcnic_host_tx_ring *tx_ring = data; napi_schedule(&tx_ring->napi); return IRQ_HANDLED; } static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) { u32 val; val = adapter->portnum & 0xf; val |= encoding << 7; val |= (jiffies - adapter->dev_rst_time) << 8; QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); adapter->dev_rst_time = jiffies; } static int qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) { u32 val; WARN_ON(state != QLCNIC_DEV_NEED_RESET && state != QLCNIC_DEV_NEED_QUISCENT); if (qlcnic_api_lock(adapter)) return -EIO; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); if (state == QLCNIC_DEV_NEED_RESET) QLC_DEV_SET_RST_RDY(val, adapter->portnum); else if (state == QLCNIC_DEV_NEED_QUISCENT) QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); return 0; } static int qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) { u32 val; if (qlcnic_api_lock(adapter)) return -EBUSY; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); return 0; } void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) { u32 val; if (qlcnic_api_lock(adapter)) goto err; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); QLC_DEV_CLR_REF_CNT(val, adapter->portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); if (failed) { QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); dev_info(&adapter->pdev->dev, "Device state set to Failed. Please Reboot\n"); } else if (!(val & 0x11111111)) QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); qlcnic_api_unlock(adapter); err: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_START_FW, &adapter->state); clear_bit(__QLCNIC_RESETTING, &adapter->state); } /* Grab api lock, before checking state */ static int qlcnic_check_drv_state(struct qlcnic_adapter *adapter) { int act, state, active_mask; struct qlcnic_hardware_context *ahw = adapter->ahw; state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); if (adapter->flags & QLCNIC_FW_RESET_OWNER) { active_mask = (~(1 << (ahw->pci_func * 4))); act = act & active_mask; } if (((state & 0x11111111) == (act & 0x11111111)) || ((act & 0x11111111) == ((state >> 1) & 0x11111111))) return 0; else return 1; } static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) { u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER); if (val != QLCNIC_DRV_IDC_VER) { dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); } return 0; } static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) { u32 val, prev_state; u8 dev_init_timeo = adapter->dev_init_timeo; u8 portnum = adapter->portnum; u8 ret; if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) return 1; if (qlcnic_api_lock(adapter)) return -1; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); if (!(val & (1 << (portnum * 4)))) { QLC_DEV_SET_REF_CNT(val, portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); } prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); QLCDB(adapter, HW, "Device state = %u\n", prev_state); switch (prev_state) { case QLCNIC_DEV_COLD: QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER); qlcnic_idc_debug_info(adapter, 0); qlcnic_api_unlock(adapter); return 1; case QLCNIC_DEV_READY: ret = qlcnic_check_idc_ver(adapter); qlcnic_api_unlock(adapter); return ret; case QLCNIC_DEV_NEED_RESET: val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_RST_RDY(val, portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); break; case QLCNIC_DEV_NEED_QUISCENT: val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_QSCNT_RDY(val, portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); break; case QLCNIC_DEV_FAILED: dev_err(&adapter->pdev->dev, "Device in failed state.\n"); qlcnic_api_unlock(adapter); return -1; case QLCNIC_DEV_INITIALIZING: case QLCNIC_DEV_QUISCENT: break; } qlcnic_api_unlock(adapter); do { msleep(1000); prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); if (!dev_init_timeo) { dev_err(&adapter->pdev->dev, "Waiting for device to initialize timeout\n"); return -1; } if (qlcnic_api_lock(adapter)) return -1; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_CLR_RST_QSCNT(val, portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); ret = qlcnic_check_idc_ver(adapter); qlcnic_api_unlock(adapter); return ret; } static void qlcnic_fwinit_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); u32 dev_state = 0xf; u32 val; if (qlcnic_api_lock(adapter)) goto err_ret; dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (dev_state == QLCNIC_DEV_QUISCENT || dev_state == QLCNIC_DEV_NEED_QUISCENT) { qlcnic_api_unlock(adapter); qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY * 2); return; } if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { qlcnic_api_unlock(adapter); goto wait_npar; } if (dev_state == QLCNIC_DEV_INITIALIZING || dev_state == QLCNIC_DEV_READY) { dev_info(&adapter->pdev->dev, "Detected state change from " "DEV_NEED_RESET, skipping ack check\n"); goto skip_ack_check; } if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", adapter->reset_ack_timeo); goto skip_ack_check; } if (!qlcnic_check_drv_state(adapter)) { skip_ack_check: dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (dev_state == QLCNIC_DEV_NEED_RESET) { QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); set_bit(__QLCNIC_START_FW, &adapter->state); QLCDB(adapter, DRV, "Restarting fw\n"); qlcnic_idc_debug_info(adapter, 0); val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); QLC_DEV_SET_RST_RDY(val, adapter->portnum); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); } qlcnic_api_unlock(adapter); rtnl_lock(); if (qlcnic_check_fw_dump_state(adapter) && (adapter->flags & QLCNIC_FW_RESET_OWNER)) { QLCDB(adapter, DRV, "Take FW dump\n"); qlcnic_dump_fw(adapter); adapter->flags |= QLCNIC_FW_HANG; } rtnl_unlock(); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; if (!adapter->nic_ops->start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); adapter->fw_wait_cnt = 0; return; } goto err_ret; } qlcnic_api_unlock(adapter); wait_npar: dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); switch (dev_state) { case QLCNIC_DEV_READY: if (!qlcnic_start_firmware(adapter)) { qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); adapter->fw_wait_cnt = 0; return; } break; case QLCNIC_DEV_FAILED: break; default: qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); return; } err_ret: dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); netif_device_attach(adapter->netdev); qlcnic_clr_all_drv_state(adapter, 0); } static void qlcnic_detach_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; u32 status; netif_device_detach(netdev); /* Dont grab rtnl lock during Quiscent mode */ if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { if (netif_running(netdev)) __qlcnic_down(adapter, netdev); } else qlcnic_down(adapter, netdev); status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (status & QLCNIC_RCODE_FATAL_ERROR) { dev_err(&adapter->pdev->dev, "Detaching the device: peg halt status1=0x%x\n", status); if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { dev_err(&adapter->pdev->dev, "On board active cooling fan failed. " "Device has been halted.\n"); dev_err(&adapter->pdev->dev, "Replace the adapter.\n"); } goto err_ret; } if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) { dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n", adapter->ahw->temp); goto err_ret; } /* Dont ack if this instance is the reset owner */ if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { if (qlcnic_set_drv_state(adapter, adapter->dev_state)) { dev_err(&adapter->pdev->dev, "Failed to set driver state," "detaching the device.\n"); goto err_ret; } } adapter->fw_wait_cnt = 0; qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); return; err_ret: netif_device_attach(netdev); qlcnic_clr_all_drv_state(adapter, 1); } /*Transit NPAR state to NON Operational */ static void qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) { u32 state; state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); if (state == QLCNIC_DEV_NPAR_NON_OPER) return; if (qlcnic_api_lock(adapter)) return; QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); qlcnic_api_unlock(adapter); } static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 state, xg_val = 0, gb_val = 0; qlcnic_xg_set_xg0_mask(xg_val); qlcnic_xg_set_xg1_mask(xg_val); QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val); qlcnic_gb_set_gb0_mask(gb_val); qlcnic_gb_set_gb1_mask(gb_val); qlcnic_gb_set_gb2_mask(gb_val); qlcnic_gb_set_gb3_mask(gb_val); QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val); dev_info(&adapter->pdev->dev, "Pause control frames disabled" " on all ports\n"); adapter->need_fw_reset = 1; if (qlcnic_api_lock(adapter)) return; state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { netdev_err(adapter->netdev, "%s: Device is in non-operational state\n", __func__); qlcnic_api_unlock(adapter); return; } if (state == QLCNIC_DEV_READY) { QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); adapter->flags |= QLCNIC_FW_RESET_OWNER; QLCDB(adapter, DRV, "NEED_RESET state set\n"); qlcnic_idc_debug_info(adapter, 0); } QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER); qlcnic_api_unlock(adapter); } /* Transit to NPAR READY state from NPAR NOT READY state */ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) { if (qlcnic_api_lock(adapter)) return; QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER); QLCDB(adapter, DRV, "NPAR operational state set\n"); qlcnic_api_unlock(adapter); } void qlcnic_schedule_work(struct qlcnic_adapter *adapter, work_func_t func, int delay) { if (test_bit(__QLCNIC_AER, &adapter->state)) return; INIT_DELAYED_WORK(&adapter->fw_work, func); queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work, round_jiffies_relative(delay)); } static void qlcnic_attach_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; u32 npar_state; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { npar_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) qlcnic_clr_all_drv_state(adapter, 0); else if (npar_state != QLCNIC_DEV_NPAR_OPER) qlcnic_schedule_work(adapter, qlcnic_attach_work, FW_POLL_DELAY); else goto attach; QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); return; } attach: qlcnic_dcb_get_info(adapter->dcb); if (netif_running(netdev)) { if (qlcnic_up(adapter, netdev)) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } done: netif_device_attach(netdev); adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); } static int qlcnic_check_health(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; u32 state = 0, heartbeat; u32 peg_status; int err = 0; if (qlcnic_check_temp(adapter)) goto detach; if (adapter->need_fw_reset) qlcnic_dev_request_reset(adapter, 0); state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_NEED_RESET) { qlcnic_set_npar_non_operational(adapter); adapter->need_fw_reset = 1; } else if (state == QLCNIC_DEV_NEED_QUISCENT) goto detach; heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != adapter->heartbeat) { adapter->heartbeat = heartbeat; adapter->fw_fail_cnt = 0; if (adapter->need_fw_reset) goto detach; if (ahw->reset_context && qlcnic_auto_fw_reset) qlcnic_reset_hw_context(adapter); return 0; } if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) return 0; adapter->flags |= QLCNIC_FW_HANG; qlcnic_dev_request_reset(adapter, 0); if (qlcnic_auto_fw_reset) clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); dev_err(&adapter->pdev->dev, "firmware hang detected\n"); peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", peg_status, QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err), QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err)); if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) dev_err(&adapter->pdev->dev, "Firmware aborted with error code 0x00006700. " "Device is being reset.\n"); detach: adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : QLCNIC_DEV_NEED_RESET; if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); QLCDB(adapter, DRV, "fw recovery scheduled.\n"); } else if (!qlcnic_auto_fw_reset && fw_dump->enable && adapter->flags & QLCNIC_FW_RESET_OWNER) { qlcnic_dump_fw(adapter); } return 1; } void qlcnic_fw_poll_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) goto reschedule; if (qlcnic_check_health(adapter)) return; if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); reschedule: qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); } static int qlcnic_is_first_func(struct pci_dev *pdev) { struct pci_dev *oth_pdev; int val = pdev->devfn; while (val-- > 0) { oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr (pdev->bus), pdev->bus->number, PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); if (!oth_pdev) continue; if (oth_pdev->current_state != PCI_D3cold) { pci_dev_put(oth_pdev); return 0; } pci_dev_put(oth_pdev); } return 1; } static int qlcnic_attach_func(struct pci_dev *pdev) { int err, first_func; struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; pdev->error_state = pci_channel_io_normal; err = pci_enable_device(pdev); if (err) return err; pci_set_master(pdev); pci_restore_state(pdev); first_func = qlcnic_is_first_func(pdev); if (qlcnic_api_lock(adapter)) return -EINVAL; if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { adapter->need_fw_reset = 1; set_bit(__QLCNIC_START_FW, &adapter->state); QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); QLCDB(adapter, DRV, "Restarting fw\n"); } qlcnic_api_unlock(adapter); err = qlcnic_start_firmware(adapter); if (err) return err; qlcnic_clr_drv_state(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; err = qlcnic_setup_intr(adapter); if (err) { kfree(adapter->msix_entries); netdev_err(netdev, "failed to setup interrupt\n"); return err; } if (netif_running(netdev)) { err = qlcnic_attach(adapter); if (err) { qlcnic_clr_all_drv_state(adapter, 1); clear_bit(__QLCNIC_AER, &adapter->state); netif_device_attach(netdev); return err; } err = qlcnic_up(adapter, netdev); if (err) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } done: netif_device_attach(netdev); return err; } static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (state == pci_channel_io_normal) return PCI_ERS_RESULT_RECOVERED; set_bit(__QLCNIC_AER, &adapter->state); netif_device_detach(netdev); cancel_delayed_work_sync(&adapter->fw_work); if (netif_running(netdev)) qlcnic_down(adapter, netdev); qlcnic_detach(adapter); qlcnic_teardown_intr(adapter); clear_bit(__QLCNIC_RESETTING, &adapter->state); pci_save_state(pdev); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev) { pci_ers_result_t res; rtnl_lock(); res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; rtnl_unlock(); return res; } static void qlcnic_82xx_io_resume(struct pci_dev *pdev) { u32 state; struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, &adapter->state)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); } static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; if (hw_ops->io_error_detected) { return hw_ops->io_error_detected(pdev, state); } else { dev_err(&pdev->dev, "AER error_detected handler not registered.\n"); return PCI_ERS_RESULT_DISCONNECT; } } static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; if (hw_ops->io_slot_reset) { return hw_ops->io_slot_reset(pdev); } else { dev_err(&pdev->dev, "AER slot_reset handler not registered.\n"); return PCI_ERS_RESULT_DISCONNECT; } } static void qlcnic_io_resume(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; if (hw_ops->io_resume) hw_ops->io_resume(pdev); else dev_err(&pdev->dev, "AER resume handler not registered.\n"); } static int qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) { int err; err = qlcnic_can_start_firmware(adapter); if (err) return err; err = qlcnic_check_npar_opertional(adapter); if (err) return err; err = qlcnic_initialize_nic(adapter); if (err) return err; qlcnic_check_options(adapter); err = qlcnic_set_eswitch_port_config(adapter); if (err) return err; adapter->need_fw_reset = 0; return err; } int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, int queue_type) { struct net_device *netdev = adapter->netdev; char buf[8]; if (queue_type == QLCNIC_RX_QUEUE) strcpy(buf, "SDS"); else strcpy(buf, "Tx"); if (!is_power_of_2(ring_cnt)) { netdev_err(netdev, "%s rings value should be a power of 2\n", buf); return -EINVAL; } if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) && !qlcnic_check_multi_tx(adapter)) { netdev_err(netdev, "No Multi Tx queue support\n"); return -EINVAL; } if (ring_cnt > num_online_cpus()) { netdev_err(netdev, "%s value[%u] should not be higher than, number of online CPUs\n", buf, num_online_cpus()); return -EINVAL; } return 0; } int qlcnic_setup_rings(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; u8 tx_rings, rx_rings; int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; tx_rings = adapter->drv_tss_rings; rx_rings = adapter->drv_rss_rings; netif_device_detach(netdev); err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); if (err) goto done; if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_enable_mbx_poll(adapter); } qlcnic_teardown_intr(adapter); err = qlcnic_setup_intr(adapter); if (err) { kfree(adapter->msix_entries); netdev_err(netdev, "failed to setup interrupt\n"); return err; } /* Check if we need to update real_num_{tx|rx}_queues because * qlcnic_setup_intr() may change Tx/Rx rings size */ if ((tx_rings != adapter->drv_tx_rings) || (rx_rings != adapter->drv_sds_rings)) { err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, adapter->drv_sds_rings); if (err) goto done; } if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_initialize_nic(adapter, 1); err = qlcnic_83xx_setup_mbx_intr(adapter); qlcnic_83xx_disable_mbx_poll(adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to setup mbx interrupt\n"); goto done; } } if (netif_running(netdev)) { err = qlcnic_attach(adapter); if (err) goto done; err = __qlcnic_up(adapter, netdev); if (err) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } done: netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; } #ifdef CONFIG_INET #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) static void qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, struct net_device *dev, unsigned long event) { const struct in_ifaddr *ifa; struct in_device *indev; indev = in_dev_get(dev); if (!indev) return; in_dev_for_each_ifa_rtnl(ifa, indev) { switch (event) { case NETDEV_UP: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); break; case NETDEV_DOWN: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); break; default: break; } } in_dev_put(indev); } void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device *dev; u16 vid; qlcnic_config_indev_addr(adapter, netdev, event); rcu_read_lock(); for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid); if (!dev) continue; qlcnic_config_indev_addr(adapter, dev, event); } rcu_read_unlock(); } static int qlcnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct qlcnic_adapter *adapter; struct net_device *dev = netdev_notifier_info_to_dev(ptr); recheck: if (dev == NULL) goto done; if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } if (!is_qlcnic_netdev(dev)) goto done; adapter = netdev_priv(dev); if (!adapter) goto done; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto done; qlcnic_config_indev_addr(adapter, dev, event); done: return NOTIFY_DONE; } static int qlcnic_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct qlcnic_adapter *adapter; struct net_device *dev; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; recheck: if (dev == NULL) goto done; if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } if (!is_qlcnic_netdev(dev)) goto done; adapter = netdev_priv(dev); if (!adapter) goto done; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto done; switch (event) { case NETDEV_UP: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); break; case NETDEV_DOWN: qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); break; default: break; } done: return NOTIFY_DONE; } static struct notifier_block qlcnic_netdev_cb = { .notifier_call = qlcnic_netdev_event, }; static struct notifier_block qlcnic_inetaddr_cb = { .notifier_call = qlcnic_inetaddr_event, }; #else void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) { } #endif static const struct pci_error_handlers qlcnic_err_handler = { .error_detected = qlcnic_io_error_detected, .slot_reset = qlcnic_io_slot_reset, .resume = qlcnic_io_resume, }; static SIMPLE_DEV_PM_OPS(qlcnic_pm_ops, qlcnic_suspend, qlcnic_resume); static struct pci_driver qlcnic_driver = { .name = qlcnic_driver_name, .id_table = qlcnic_pci_tbl, .probe = qlcnic_probe, .remove = qlcnic_remove, .driver.pm = &qlcnic_pm_ops, .shutdown = qlcnic_shutdown, .err_handler = &qlcnic_err_handler, #ifdef CONFIG_QLCNIC_SRIOV .sriov_configure = qlcnic_pci_sriov_configure, #endif }; static int __init qlcnic_init_module(void) { int ret; printk(KERN_INFO "%s\n", qlcnic_driver_string); #ifdef CONFIG_INET register_netdevice_notifier(&qlcnic_netdev_cb); register_inetaddr_notifier(&qlcnic_inetaddr_cb); #endif ret = pci_register_driver(&qlcnic_driver); if (ret) { #ifdef CONFIG_INET unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); unregister_netdevice_notifier(&qlcnic_netdev_cb); #endif } return ret; } module_init(qlcnic_init_module); static void __exit qlcnic_exit_module(void) { pci_unregister_driver(&qlcnic_driver); #ifdef CONFIG_INET unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); unregister_netdevice_notifier(&qlcnic_netdev_cb); #endif } module_exit(qlcnic_exit_module);
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <net/ip.h> #include "qlcnic.h" #include "qlcnic_hdr.h" #include "qlcnic_83xx_hw.h" #include "qlcnic_hw.h" #define QLC_83XX_MINIDUMP_FLASH 0x520000 #define QLC_83XX_OCM_INDEX 3 #define QLC_83XX_PCI_INDEX 0 #define QLC_83XX_DMA_ENGINE_INDEX 8 static const u32 qlcnic_ms_read_data[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC }; #define QLCNIC_DUMP_WCRB BIT_0 #define QLCNIC_DUMP_RWCRB BIT_1 #define QLCNIC_DUMP_ANDCRB BIT_2 #define QLCNIC_DUMP_ORCRB BIT_3 #define QLCNIC_DUMP_POLLCRB BIT_4 #define QLCNIC_DUMP_RD_SAVE BIT_5 #define QLCNIC_DUMP_WRT_SAVED BIT_6 #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 #define QLCNIC_DUMP_SKIP BIT_7 #define QLCNIC_DUMP_MASK_MAX 0xff struct qlcnic_pex_dma_descriptor { u32 read_data_size; u32 dma_desc_cmd; u32 src_addr_low; u32 src_addr_high; u32 dma_bus_addr_low; u32 dma_bus_addr_high; u32 rsvd[6]; } __packed; struct qlcnic_common_entry_hdr { u32 type; u32 offset; u32 cap_size; #if defined(__LITTLE_ENDIAN) u8 mask; u8 rsvd[2]; u8 flags; #else u8 flags; u8 rsvd[2]; u8 mask; #endif } __packed; struct __crb { u32 addr; #if defined(__LITTLE_ENDIAN) u8 stride; u8 rsvd1[3]; #else u8 rsvd1[3]; u8 stride; #endif u32 data_size; u32 no_ops; u32 rsvd2[4]; } __packed; struct __ctrl { u32 addr; #if defined(__LITTLE_ENDIAN) u8 stride; u8 index_a; u16 timeout; #else u16 timeout; u8 index_a; u8 stride; #endif u32 data_size; u32 no_ops; #if defined(__LITTLE_ENDIAN) u8 opcode; u8 index_v; u8 shl_val; u8 shr_val; #else u8 shr_val; u8 shl_val; u8 index_v; u8 opcode; #endif u32 val1; u32 val2; u32 val3; } __packed; struct __cache { u32 addr; #if defined(__LITTLE_ENDIAN) u16 stride; u16 init_tag_val; #else u16 init_tag_val; u16 stride; #endif u32 size; u32 no_ops; u32 ctrl_addr; u32 ctrl_val; u32 read_addr; #if defined(__LITTLE_ENDIAN) u8 read_addr_stride; u8 read_addr_num; u8 rsvd1[2]; #else u8 rsvd1[2]; u8 read_addr_num; u8 read_addr_stride; #endif } __packed; struct __ocm { u8 rsvd[8]; u32 size; u32 no_ops; u8 rsvd1[8]; u32 read_addr; u32 read_addr_stride; } __packed; struct __mem { u32 desc_card_addr; u32 dma_desc_cmd; u32 start_dma_cmd; u32 rsvd[3]; u32 addr; u32 size; } __packed; struct __mux { u32 addr; u8 rsvd[4]; u32 size; u32 no_ops; u32 val; u32 val_stride; u32 read_addr; u8 rsvd2[4]; } __packed; struct __queue { u32 sel_addr; #if defined(__LITTLE_ENDIAN) u16 stride; u8 rsvd[2]; #else u8 rsvd[2]; u16 stride; #endif u32 size; u32 no_ops; u8 rsvd2[8]; u32 read_addr; #if defined(__LITTLE_ENDIAN) u8 read_addr_stride; u8 read_addr_cnt; u8 rsvd3[2]; #else u8 rsvd3[2]; u8 read_addr_cnt; u8 read_addr_stride; #endif } __packed; struct __pollrd { u32 sel_addr; u32 read_addr; u32 sel_val; #if defined(__LITTLE_ENDIAN) u16 sel_val_stride; u16 no_ops; #else u16 no_ops; u16 sel_val_stride; #endif u32 poll_wait; u32 poll_mask; u32 data_size; u8 rsvd[4]; } __packed; struct __mux2 { u32 sel_addr1; u32 sel_addr2; u32 sel_val1; u32 sel_val2; u32 no_ops; u32 sel_val_mask; u32 read_addr; #if defined(__LITTLE_ENDIAN) u8 sel_val_stride; u8 data_size; u8 rsvd[2]; #else u8 rsvd[2]; u8 data_size; u8 sel_val_stride; #endif } __packed; struct __pollrdmwr { u32 addr1; u32 addr2; u32 val1; u32 val2; u32 poll_wait; u32 poll_mask; u32 mod_mask; u32 data_size; } __packed; struct qlcnic_dump_entry { struct qlcnic_common_entry_hdr hdr; union { struct __crb crb; struct __cache cache; struct __ocm ocm; struct __mem mem; struct __mux mux; struct __queue que; struct __ctrl ctrl; struct __pollrdmwr pollrdmwr; struct __mux2 mux2; struct __pollrd pollrd; } region; } __packed; enum qlcnic_minidump_opcode { QLCNIC_DUMP_NOP = 0, QLCNIC_DUMP_READ_CRB = 1, QLCNIC_DUMP_READ_MUX = 2, QLCNIC_DUMP_QUEUE = 3, QLCNIC_DUMP_BRD_CONFIG = 4, QLCNIC_DUMP_READ_OCM = 6, QLCNIC_DUMP_PEG_REG = 7, QLCNIC_DUMP_L1_DTAG = 8, QLCNIC_DUMP_L1_ITAG = 9, QLCNIC_DUMP_L1_DATA = 11, QLCNIC_DUMP_L1_INST = 12, QLCNIC_DUMP_L2_DTAG = 21, QLCNIC_DUMP_L2_ITAG = 22, QLCNIC_DUMP_L2_DATA = 23, QLCNIC_DUMP_L2_INST = 24, QLCNIC_DUMP_POLL_RD = 35, QLCNIC_READ_MUX2 = 36, QLCNIC_READ_POLLRDMWR = 37, QLCNIC_DUMP_READ_ROM = 71, QLCNIC_DUMP_READ_MEM = 72, QLCNIC_DUMP_READ_CTRL = 98, QLCNIC_DUMP_TLHDR = 99, QLCNIC_DUMP_RDEND = 255 }; inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index) { struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; return hdr->saved_state[index]; } inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index, u32 value) { struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; hdr->saved_state[index] = value; } void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) { struct qlcnic_82xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; fw_dump->tmpl_hdr_size = hdr->size; fw_dump->version = hdr->version; fw_dump->num_entries = hdr->num_entries; fw_dump->offset = hdr->offset; hdr->drv_cap_mask = hdr->cap_mask; fw_dump->cap_mask = hdr->cap_mask; fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false; } inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) { struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; return hdr->cap_sizes[index]; } void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value) { struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; hdr->sys_info[idx] = value; } void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask) { struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr; hdr->drv_cap_mask = mask; } inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index) { struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; return hdr->saved_state[index]; } inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index, u32 value) { struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; hdr->saved_state[index] = value; } #define QLCNIC_TEMPLATE_VERSION (0x20001) void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) { struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; fw_dump->tmpl_hdr_size = hdr->size; fw_dump->version = hdr->version; fw_dump->num_entries = hdr->num_entries; fw_dump->offset = hdr->offset; hdr->drv_cap_mask = hdr->cap_mask; fw_dump->cap_mask = hdr->cap_mask; fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION; } inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) { struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; return hdr->cap_sizes[index]; } void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value) { struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; hdr->sys_info[idx] = value; } void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask) { struct qlcnic_83xx_dump_template_hdr *hdr; hdr = tmpl_hdr; hdr->drv_cap_mask = mask; } struct qlcnic_dump_operations { enum qlcnic_minidump_opcode opcode; u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *, __le32 *); }; static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i; u32 addr, data; struct __crb *crb = &entry->region.crb; addr = crb->addr; for (i = 0; i < crb->no_ops; i++) { data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(addr); *buffer++ = cpu_to_le32(data); addr += crb->stride; } return crb->no_ops * 2 * sizeof(u32); } static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { void *hdr = adapter->ahw->fw_dump.tmpl_hdr; struct __ctrl *ctr = &entry->region.ctrl; int i, k, timeout = 0; u32 addr, data, temp; u8 no_ops; addr = ctr->addr; no_ops = ctr->no_ops; for (i = 0; i < no_ops; i++) { k = 0; for (k = 0; k < 8; k++) { if (!(ctr->opcode & (1 << k))) continue; switch (1 << k) { case QLCNIC_DUMP_WCRB: qlcnic_ind_wr(adapter, addr, ctr->val1); break; case QLCNIC_DUMP_RWCRB: data = qlcnic_ind_rd(adapter, addr); qlcnic_ind_wr(adapter, addr, data); break; case QLCNIC_DUMP_ANDCRB: data = qlcnic_ind_rd(adapter, addr); qlcnic_ind_wr(adapter, addr, (data & ctr->val2)); break; case QLCNIC_DUMP_ORCRB: data = qlcnic_ind_rd(adapter, addr); qlcnic_ind_wr(adapter, addr, (data | ctr->val3)); break; case QLCNIC_DUMP_POLLCRB: while (timeout <= ctr->timeout) { data = qlcnic_ind_rd(adapter, addr); if ((data & ctr->val2) == ctr->val1) break; usleep_range(1000, 2000); timeout++; } if (timeout > ctr->timeout) { dev_info(&adapter->pdev->dev, "Timed out, aborting poll CRB\n"); return -EINVAL; } break; case QLCNIC_DUMP_RD_SAVE: temp = ctr->index_a; if (temp) addr = qlcnic_get_saved_state(adapter, hdr, temp); data = qlcnic_ind_rd(adapter, addr); qlcnic_set_saved_state(adapter, hdr, ctr->index_v, data); break; case QLCNIC_DUMP_WRT_SAVED: temp = ctr->index_v; if (temp) data = qlcnic_get_saved_state(adapter, hdr, temp); else data = ctr->val1; temp = ctr->index_a; if (temp) addr = qlcnic_get_saved_state(adapter, hdr, temp); qlcnic_ind_wr(adapter, addr, data); break; case QLCNIC_DUMP_MOD_SAVE_ST: data = qlcnic_get_saved_state(adapter, hdr, ctr->index_v); data <<= ctr->shl_val; data >>= ctr->shr_val; if (ctr->val2) data &= ctr->val2; data |= ctr->val3; data += ctr->val1; qlcnic_set_saved_state(adapter, hdr, ctr->index_v, data); break; default: dev_info(&adapter->pdev->dev, "Unknown opcode\n"); break; } } addr += ctr->stride; } return 0; } static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int loop; u32 val, data = 0; struct __mux *mux = &entry->region.mux; val = mux->val; for (loop = 0; loop < mux->no_ops; loop++) { qlcnic_ind_wr(adapter, mux->addr, val); data = qlcnic_ind_rd(adapter, mux->read_addr); *buffer++ = cpu_to_le32(val); *buffer++ = cpu_to_le32(data); val += mux->val_stride; } return 2 * mux->no_ops * sizeof(u32); } static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i, loop; u32 cnt, addr, data, que_id = 0; struct __queue *que = &entry->region.que; addr = que->read_addr; cnt = que->read_addr_cnt; for (loop = 0; loop < que->no_ops; loop++) { qlcnic_ind_wr(adapter, que->sel_addr, que_id); addr = que->read_addr; for (i = 0; i < cnt; i++) { data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += que->read_addr_stride; } que_id += que->stride; } return que->no_ops * cnt * sizeof(u32); } static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i; u32 data; void __iomem *addr; struct __ocm *ocm = &entry->region.ocm; addr = adapter->ahw->pci_base0 + ocm->read_addr; for (i = 0; i < ocm->no_ops; i++) { data = readl(addr); *buffer++ = cpu_to_le32(data); addr += ocm->read_addr_stride; } return ocm->no_ops * sizeof(u32); } static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i, count = 0; u32 fl_addr, size, val, lck_val, addr; struct __mem *rom = &entry->region.mem; fl_addr = rom->addr; size = rom->size / 4; lock_try: lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); if (!lck_val && count < MAX_CTL_CHECK) { usleep_range(10000, 11000); count++; goto lock_try; } QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->ahw->pci_func); for (i = 0; i < size; i++) { addr = fl_addr & 0xFFFF0000; qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr); addr = LSW(fl_addr) + FLASH_ROM_DATA; val = qlcnic_ind_rd(adapter, addr); fl_addr += 4; *buffer++ = cpu_to_le32(val); } QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); return rom->size; } static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i; u32 cnt, val, data, addr; struct __cache *l1 = &entry->region.cache; val = l1->init_tag_val; for (i = 0; i < l1->no_ops; i++) { qlcnic_ind_wr(adapter, l1->addr, val); qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val)); addr = l1->read_addr; cnt = l1->read_addr_num; while (cnt) { data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += l1->read_addr_stride; cnt--; } val += l1->stride; } return l1->no_ops * l1->read_addr_num * sizeof(u32); } static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { int i; u32 cnt, val, data, addr; u8 poll_mask, poll_to, time_out = 0; struct __cache *l2 = &entry->region.cache; val = l2->init_tag_val; poll_mask = LSB(MSW(l2->ctrl_val)); poll_to = MSB(MSW(l2->ctrl_val)); for (i = 0; i < l2->no_ops; i++) { qlcnic_ind_wr(adapter, l2->addr, val); if (LSW(l2->ctrl_val)) qlcnic_ind_wr(adapter, l2->ctrl_addr, LSW(l2->ctrl_val)); if (!poll_mask) goto skip_poll; do { data = qlcnic_ind_rd(adapter, l2->ctrl_addr); if (!(data & poll_mask)) break; usleep_range(1000, 2000); time_out++; } while (time_out <= poll_to); if (time_out > poll_to) { dev_err(&adapter->pdev->dev, "Timeout exceeded in %s, aborting dump\n", __func__); return -EINVAL; } skip_poll: addr = l2->read_addr; cnt = l2->read_addr_num; while (cnt) { data = qlcnic_ind_rd(adapter, addr); *buffer++ = cpu_to_le32(data); addr += l2->read_addr_stride; cnt--; } val += l2->stride; } return l2->no_ops * l2->read_addr_num * sizeof(u32); } static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, struct __mem *mem, __le32 *buffer, int *ret) { u32 addr, data, test; int i, reg_read; reg_read = mem->size; addr = mem->addr; /* check for data size of multiple of 16 and 16 byte alignment */ if ((addr & 0xf) || (reg_read%16)) { dev_info(&adapter->pdev->dev, "Unaligned memory addr:0x%x size:0x%x\n", addr, reg_read); *ret = -EINVAL; return 0; } mutex_lock(&adapter->ahw->mem_lock); while (reg_read != 0) { qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr); qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0); qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE); for (i = 0; i < MAX_CTL_CHECK; i++) { test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL); if (!(test & TA_CTL_BUSY)) break; } if (i == MAX_CTL_CHECK) { if (printk_ratelimit()) { dev_err(&adapter->pdev->dev, "failed to read through agent\n"); *ret = -EIO; goto out; } } for (i = 0; i < 4; i++) { data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]); *buffer++ = cpu_to_le32(data); } addr += 16; reg_read -= 16; ret += 16; cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); return mem->size; } /* DMA register base address */ #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000)) /* DMA register offsets w.r.t base address */ #define QLC_DMA_CMD_BUFF_ADDR_LOW 0 #define QLC_DMA_CMD_BUFF_ADDR_HI 4 #define QLC_DMA_CMD_STATUS_CTRL 8 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, struct __mem *mem) { struct device *dev = &adapter->pdev->dev; u32 dma_no, dma_base_addr, temp_addr; int i, ret, dma_sts; void *tmpl_hdr; tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr, QLC_83XX_DMA_ENGINE_INDEX); dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr); if (ret) return ret; temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; ret = qlcnic_ind_wr(adapter, temp_addr, 0); if (ret) return ret; temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd); if (ret) return ret; /* Wait for DMA to complete */ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; for (i = 0; i < 400; i++) { dma_sts = qlcnic_ind_rd(adapter, temp_addr); if (dma_sts & BIT_1) usleep_range(250, 500); else break; } if (i >= 400) { dev_info(dev, "PEX DMA operation timed out"); ret = -EIO; } return ret; } static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter, struct __mem *mem, __le32 *buffer, int *ret) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; u32 temp, dma_base_addr, size = 0, read_size = 0; struct qlcnic_pex_dma_descriptor *dma_descr; struct device *dev = &adapter->pdev->dev; dma_addr_t dma_phys_addr; void *dma_buffer; void *tmpl_hdr; tmpl_hdr = fw_dump->tmpl_hdr; /* Check if DMA engine is available */ temp = qlcnic_get_saved_state(adapter, tmpl_hdr, QLC_83XX_DMA_ENGINE_INDEX); dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); temp = qlcnic_ind_rd(adapter, dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); if (!(temp & BIT_31)) { dev_info(dev, "%s: DMA engine is not available\n", __func__); *ret = -EIO; return 0; } /* Create DMA descriptor */ dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor), GFP_KERNEL); if (!dma_descr) { *ret = -ENOMEM; return 0; } /* dma_desc_cmd 0:15 = 0 * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3 * dma_desc_cmd 20:23 = pci function number * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15 */ dma_phys_addr = fw_dump->phys_addr; dma_buffer = fw_dump->dma_buffer; temp = 0; temp = mem->dma_desc_cmd & 0xff0f; temp |= (adapter->ahw->pci_func & 0xf) << 4; dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000; dma_descr->dma_bus_addr_low = LSD(dma_phys_addr); dma_descr->dma_bus_addr_high = MSD(dma_phys_addr); dma_descr->src_addr_high = 0; /* Collect memory dump using multiple DMA operations if required */ while (read_size < mem->size) { if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE) size = QLC_PEX_DMA_READ_SIZE; else size = mem->size - read_size; dma_descr->src_addr_low = mem->addr + read_size; dma_descr->read_data_size = size; /* Write DMA descriptor to MS memory*/ temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr, (u32 *)dma_descr, temp); if (*ret) { dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", mem->desc_card_addr); goto free_dma_descr; } *ret = qlcnic_start_pex_dma(adapter, mem); if (*ret) { dev_info(dev, "Failed to start PEX DMA operation\n"); goto free_dma_descr; } memcpy(buffer, dma_buffer, size); buffer += size / 4; read_size += size; } free_dma_descr: kfree(dma_descr); return read_size; } static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct device *dev = &adapter->pdev->dev; struct __mem *mem = &entry->region.mem; u32 data_size; int ret = 0; if (fw_dump->use_pex_dma) { data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer, &ret); if (ret) dev_info(dev, "Failed to read memory dump using PEX DMA: mask[0x%x]\n", entry->hdr.mask); else return data_size; } data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret); if (ret) { dev_info(dev, "Failed to read memory dump using test agent method: mask[0x%x]\n", entry->hdr.mask); return 0; } else { return data_size; } } static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; return 0; } static int qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry, u32 size) { int ret = 1; if (size != entry->hdr.cap_size) { dev_err(dev, "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size); ret = 0; } return ret; } static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { struct __pollrdmwr *poll = &entry->region.pollrdmwr; u32 data, wait_count, poll_wait, temp; poll_wait = poll->poll_wait; qlcnic_ind_wr(adapter, poll->addr1, poll->val1); wait_count = 0; while (wait_count < poll_wait) { data = qlcnic_ind_rd(adapter, poll->addr1); if ((data & poll->poll_mask) != 0) break; wait_count++; } if (wait_count == poll_wait) { dev_err(&adapter->pdev->dev, "Timeout exceeded in %s, aborting dump\n", __func__); return 0; } data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask; qlcnic_ind_wr(adapter, poll->addr2, data); qlcnic_ind_wr(adapter, poll->addr1, poll->val2); wait_count = 0; while (wait_count < poll_wait) { temp = qlcnic_ind_rd(adapter, poll->addr1); if ((temp & poll->poll_mask) != 0) break; wait_count++; } *buffer++ = cpu_to_le32(poll->addr2); *buffer++ = cpu_to_le32(data); return 2 * sizeof(u32); } static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { struct __pollrd *pollrd = &entry->region.pollrd; u32 data, wait_count, poll_wait, sel_val; int i; poll_wait = pollrd->poll_wait; sel_val = pollrd->sel_val; for (i = 0; i < pollrd->no_ops; i++) { qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val); wait_count = 0; while (wait_count < poll_wait) { data = qlcnic_ind_rd(adapter, pollrd->sel_addr); if ((data & pollrd->poll_mask) != 0) break; wait_count++; } if (wait_count == poll_wait) { dev_err(&adapter->pdev->dev, "Timeout exceeded in %s, aborting dump\n", __func__); return 0; } data = qlcnic_ind_rd(adapter, pollrd->read_addr); *buffer++ = cpu_to_le32(sel_val); *buffer++ = cpu_to_le32(data); sel_val += pollrd->sel_val_stride; } return pollrd->no_ops * (2 * sizeof(u32)); } static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { struct __mux2 *mux2 = &entry->region.mux2; u32 data; u32 t_sel_val, sel_val1, sel_val2; int i; sel_val1 = mux2->sel_val1; sel_val2 = mux2->sel_val2; for (i = 0; i < mux2->no_ops; i++) { qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1); t_sel_val = sel_val1 & mux2->sel_val_mask; qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); data = qlcnic_ind_rd(adapter, mux2->read_addr); *buffer++ = cpu_to_le32(t_sel_val); *buffer++ = cpu_to_le32(data); qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2); t_sel_val = sel_val2 & mux2->sel_val_mask; qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); data = qlcnic_ind_rd(adapter, mux2->read_addr); *buffer++ = cpu_to_le32(t_sel_val); *buffer++ = cpu_to_le32(data); sel_val1 += mux2->sel_val_stride; sel_val2 += mux2->sel_val_stride; } return mux2->no_ops * (4 * sizeof(u32)); } static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { u32 fl_addr, size; struct __mem *rom = &entry->region.mem; fl_addr = rom->addr; size = rom->size / 4; if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr, (u8 *)buffer, size)) return rom->size; return 0; } static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = { {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom}, {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom}, {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, }; static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = { {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom}, {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd}, {QLCNIC_READ_MUX2, qlcnic_read_mux2}, {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr}, {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom}, {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, }; static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size) { uint64_t sum = 0; int count = temp_size / sizeof(uint32_t); while (count-- > 0) sum += *temp_buffer++; while (sum >> 32) sum = (sum & 0xFFFFFFFF) + (sum >> 32); return ~sum; } static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter, u8 *buffer, u32 size) { int ret = 0; if (qlcnic_82xx_check(adapter)) return -EIO; if (qlcnic_83xx_lock_flash(adapter)) return -EIO; ret = qlcnic_83xx_lockless_flash_read32(adapter, QLC_83XX_MINIDUMP_FLASH, buffer, size / sizeof(u32)); qlcnic_83xx_unlock_flash(adapter); return ret; } static int qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_83xx_dump_template_hdr tmp_hdr; u32 size = sizeof(tmp_hdr) / sizeof(u32); int ret = 0; if (qlcnic_82xx_check(adapter)) return -EIO; if (qlcnic_83xx_lock_flash(adapter)) return -EIO; ret = qlcnic_83xx_lockless_flash_read32(adapter, QLC_83XX_MINIDUMP_FLASH, (u8 *)&tmp_hdr, size); qlcnic_83xx_unlock_flash(adapter); cmd->rsp.arg[2] = tmp_hdr.size; cmd->rsp.arg[3] = tmp_hdr.version; return ret; } static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter, u32 *version, u32 *temp_size, u8 *use_flash_temp) { int err = 0; struct qlcnic_cmd_args cmd; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE)) return -ENOMEM; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) { qlcnic_free_mbx_args(&cmd); return -EIO; } *use_flash_temp = 1; } *temp_size = cmd.rsp.arg[2]; *version = cmd.rsp.arg[3]; qlcnic_free_mbx_args(&cmd); if (!(*temp_size)) return -EIO; return 0; } static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter, u32 *buffer, u32 temp_size) { int err = 0, i; void *tmp_addr; __le32 *tmp_buf; struct qlcnic_cmd_args cmd; dma_addr_t tmp_addr_t = 0; tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, &tmp_addr_t, GFP_KERNEL); if (!tmp_addr) return -ENOMEM; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { err = -ENOMEM; goto free_mem; } cmd.req.arg[1] = LSD(tmp_addr_t); cmd.req.arg[2] = MSD(tmp_addr_t); cmd.req.arg[3] = temp_size; err = qlcnic_issue_cmd(adapter, &cmd); tmp_buf = tmp_addr; if (err == QLCNIC_RCODE_SUCCESS) { for (i = 0; i < temp_size / sizeof(u32); i++) *buffer++ = __le32_to_cpu(*tmp_buf++); } qlcnic_free_mbx_args(&cmd); free_mem: dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); return err; } int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw; struct qlcnic_fw_dump *fw_dump; u32 version, csum, *tmp_buf; u8 use_flash_temp = 0; u32 temp_size = 0; void *temp_buffer; int err; ahw = adapter->ahw; fw_dump = &ahw->fw_dump; err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, &use_flash_temp); if (err) { dev_err(&adapter->pdev->dev, "Can't get template size %d\n", err); return -EIO; } fw_dump->tmpl_hdr = vzalloc(temp_size); if (!fw_dump->tmpl_hdr) return -ENOMEM; tmp_buf = (u32 *)fw_dump->tmpl_hdr; if (use_flash_temp) goto flash_temp; err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size); if (err) { flash_temp: err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf, temp_size); if (err) { dev_err(&adapter->pdev->dev, "Failed to get minidump template header %d\n", err); vfree(fw_dump->tmpl_hdr); fw_dump->tmpl_hdr = NULL; return -EIO; } } csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size); if (csum) { dev_err(&adapter->pdev->dev, "Template header checksum validation failed\n"); vfree(fw_dump->tmpl_hdr); fw_dump->tmpl_hdr = NULL; return -EIO; } qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); if (fw_dump->use_pex_dma) { fw_dump->dma_buffer = NULL; temp_buffer = dma_alloc_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE, &fw_dump->phys_addr, GFP_KERNEL); if (!temp_buffer) fw_dump->use_pex_dma = false; else fw_dump->dma_buffer = temp_buffer; } dev_info(&adapter->pdev->dev, "Default minidump capture mask 0x%x\n", fw_dump->cap_mask); qlcnic_enable_fw_dump_state(adapter); return 0; } int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; const struct qlcnic_dump_operations *fw_dump_ops; struct qlcnic_83xx_dump_template_hdr *hdr_83xx; u32 entry_offset, dump, no_entries, buf_offset = 0; int i, k, ops_cnt, ops_index, dump_size = 0; struct device *dev = &adapter->pdev->dev; struct qlcnic_hardware_context *ahw; struct qlcnic_dump_entry *entry; void *tmpl_hdr; u32 ocm_window; __le32 *buffer; char mesg[64]; char *msg[] = {mesg, NULL}; ahw = adapter->ahw; tmpl_hdr = fw_dump->tmpl_hdr; /* Return if we don't have firmware dump template header */ if (!tmpl_hdr) return -EIO; if (!qlcnic_check_fw_dump_state(adapter)) { dev_info(&adapter->pdev->dev, "Dump not enabled\n"); return -EIO; } if (fw_dump->clr) { dev_info(&adapter->pdev->dev, "Previous dump not cleared, not capturing dump\n"); return -EIO; } netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); /* Calculate the size for dump data area only */ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) if (i & fw_dump->cap_mask) dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k); if (!dump_size) return -EIO; fw_dump->data = vzalloc(dump_size); if (!fw_dump->data) return -ENOMEM; buffer = fw_dump->data; fw_dump->size = dump_size; no_entries = fw_dump->num_entries; entry_offset = fw_dump->offset; qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); if (qlcnic_82xx_check(adapter)) { ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); fw_dump_ops = qlcnic_fw_dump_ops; } else { hdr_83xx = tmpl_hdr; ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); fw_dump_ops = qlcnic_83xx_fw_dump_ops; ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func]; hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; } for (i = 0; i < no_entries; i++) { entry = tmpl_hdr + entry_offset; if (!(entry->hdr.mask & fw_dump->cap_mask)) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; entry_offset += entry->hdr.offset; continue; } /* Find the handler for this entry */ ops_index = 0; while (ops_index < ops_cnt) { if (entry->hdr.type == fw_dump_ops[ops_index].opcode) break; ops_index++; } if (ops_index == ops_cnt) { dev_info(dev, "Skipping unknown entry opcode %d\n", entry->hdr.type); entry->hdr.flags |= QLCNIC_DUMP_SKIP; entry_offset += entry->hdr.offset; continue; } /* Collect dump for this entry */ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); if (!qlcnic_valid_dump_entry(dev, entry, dump)) { entry->hdr.flags |= QLCNIC_DUMP_SKIP; entry_offset += entry->hdr.offset; continue; } buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; cond_resched(); } fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); netdev_info(adapter->netdev, "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n", fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size, fw_dump->tmpl_hdr); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); return 0; } static inline bool qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter) { /* For special adapters (with 0x8830 device ID), where iSCSI firmware * dump needs to be captured as part of regular firmware dump * collection process, firmware exports it's capability through * capability registers */ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) && (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP)); } void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) { u32 prev_version, current_version; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct pci_dev *pdev = adapter->pdev; bool extended = false; int ret; prev_version = adapter->fw_version; current_version = qlcnic_83xx_get_fw_version(adapter); if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); ret = qlcnic_fw_cmd_get_minidump_temp(adapter); if (ret) return; dev_info(&pdev->dev, "Supports FW dump capability\n"); /* Once we have minidump template with extended iSCSI dump * capability, update the minidump capture mask to 0x1f as * per FW requirement */ if (extended) { struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; if (!hdr) return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, "Extended iSCSI dump capability and updated capture mask to 0x1f\n"); } } }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_hw.h" /* Reset template definitions */ #define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000 #define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000 #define QLC_83XX_RESET_SEQ_VERSION 0x0101 #define QLC_83XX_OPCODE_NOP 0x0000 #define QLC_83XX_OPCODE_WRITE_LIST 0x0001 #define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002 #define QLC_83XX_OPCODE_POLL_LIST 0x0004 #define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008 #define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010 #define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020 #define QLC_83XX_OPCODE_SEQ_END 0x0040 #define QLC_83XX_OPCODE_TMPL_END 0x0080 #define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 /* EPORT control registers */ #define QLC_83XX_RESET_CONTROL 0x28084E50 #define QLC_83XX_RESET_REG 0x28084E60 #define QLC_83XX_RESET_PORT0 0x28084E70 #define QLC_83XX_RESET_PORT1 0x28084E80 #define QLC_83XX_RESET_PORT2 0x28084E90 #define QLC_83XX_RESET_PORT3 0x28084EA0 #define QLC_83XX_RESET_SRESHIM 0x28084EB0 #define QLC_83XX_RESET_EPGSHIM 0x28084EC0 #define QLC_83XX_RESET_ETHERPCS 0x28084ED0 static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *); static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); /* Template header */ struct qlc_83xx_reset_hdr { #if defined(__LITTLE_ENDIAN) u16 version; u16 signature; u16 size; u16 entries; u16 hdr_size; u16 checksum; u16 init_offset; u16 start_offset; #elif defined(__BIG_ENDIAN) u16 signature; u16 version; u16 entries; u16 size; u16 checksum; u16 hdr_size; u16 start_offset; u16 init_offset; #endif } __packed; /* Command entry header. */ struct qlc_83xx_entry_hdr { #if defined(__LITTLE_ENDIAN) u16 cmd; u16 size; u16 count; u16 delay; #elif defined(__BIG_ENDIAN) u16 size; u16 cmd; u16 delay; u16 count; #endif } __packed; /* Generic poll command */ struct qlc_83xx_poll { u32 mask; u32 status; } __packed; /* Read modify write command */ struct qlc_83xx_rmw { u32 mask; u32 xor_value; u32 or_value; #if defined(__LITTLE_ENDIAN) u8 shl; u8 shr; u8 index_a; u8 rsvd; #elif defined(__BIG_ENDIAN) u8 rsvd; u8 index_a; u8 shr; u8 shl; #endif } __packed; /* Generic command with 2 DWORD */ struct qlc_83xx_entry { u32 arg1; u32 arg2; } __packed; /* Generic command with 4 DWORD */ struct qlc_83xx_quad_entry { u32 dr_addr; u32 dr_value; u32 ar_addr; u32 ar_value; } __packed; static const char *const qlc_83xx_idc_states[] = { "Unknown", "Cold", "Init", "Ready", "Need Reset", "Need Quiesce", "Failed", "Quiesce" }; static int qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter) { u32 val; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); if ((val & 0xFFFF)) return 1; else return 0; } static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter) { u32 cur, prev; cur = adapter->ahw->idc.curr_state; prev = adapter->ahw->idc.prev_state; dev_info(&adapter->pdev->dev, "current state = %s, prev state = %s\n", adapter->ahw->idc.name[cur], adapter->ahw->idc.name[prev]); } static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter, u8 mode, int lock) { u32 val; int seconds; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); val |= (adapter->portnum & 0xf); val |= mode << 7; if (mode) seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; else seconds = jiffies / HZ; val |= seconds << 8; QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val); adapter->ahw->idc.sec_counter = jiffies / HZ; if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter) { u32 val; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION); val = val & ~(0x3 << (adapter->portnum * 2)); val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2)); QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val); } static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); val = val & ~0xFF; val = val | QLC_83XX_IDC_MAJOR_VERSION; QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter, int status, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); if (status) val = val | (1 << adapter->portnum); else val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); qlcnic_83xx_idc_update_minor_version(adapter); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter) { u32 val; u8 version; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); version = val & 0xFF; if (version != QLC_83XX_IDC_MAJOR_VERSION) { dev_info(&adapter->pdev->dev, "%s:mismatch. version 0x%x, expected version 0x%x\n", __func__, version, QLC_83XX_IDC_MAJOR_VERSION); return -EIO; } return 0; } static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); /* Clear graceful reset bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val &= ~QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter, int flag, int lock) { u32 val; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); if (flag) val = val | (1 << adapter->portnum); else val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter, int time_limit) { u64 seconds; seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; if (seconds <= time_limit) return 0; else return -EBUSY; } /** * qlcnic_83xx_idc_check_reset_ack_reg * * @adapter: adapter structure * * Check ACK wait limit and clear the functions which failed to ACK * * Return 0 if all functions have acknowledged the reset request. **/ static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter) { int timeout; u32 ack, presence, val; timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS; ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); dev_info(&adapter->pdev->dev, "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence); if (!((ack & presence) == presence)) { if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) { /* Clear functions which failed to ACK */ dev_info(&adapter->pdev->dev, "%s: ACK wait exceeds time limit\n", __func__); val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); val = val & ~(ack ^ presence); if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); dev_info(&adapter->pdev->dev, "%s: updated drv presence reg = 0x%x\n", __func__, val); qlcnic_83xx_unlock_driver(adapter); return 0; } else { return 1; } } else { dev_info(&adapter->pdev->dev, "%s: Reset ACK received from all functions\n", __func__); return 0; } } /** * qlcnic_83xx_idc_tx_soft_reset * * @adapter: adapter structure * * Handle context deletion and recreation request from transmit routine * * Returns -EBUSY or Success (0) * **/ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; netif_device_detach(netdev); qlcnic_down(adapter, netdev); qlcnic_up(adapter, netdev); netif_device_attach(netdev); clear_bit(__QLCNIC_RESETTING, &adapter->state); netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__); return 0; } /** * qlcnic_83xx_idc_detach_driver * * @adapter: adapter structure * Detach net interface, stop TX and cleanup resources before the HW reset. * Returns: None * **/ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) { int i; struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_83xx_detach_mailbox_work(adapter); /* Disable mailbox interrupt */ qlcnic_83xx_disable_mbx_intr(adapter); qlcnic_down(adapter, netdev); for (i = 0; i < adapter->ahw->num_msix; i++) { adapter->ahw->intr_tbl[i].id = i; adapter->ahw->intr_tbl[i].enabled = 0; adapter->ahw->intr_tbl[i].src = 0; } if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_reset(adapter); } /** * qlcnic_83xx_idc_attach_driver * * @adapter: adapter structure * * Re-attach and re-enable net interface * Returns: None * **/ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) { if (qlcnic_up(adapter, netdev)) goto done; qlcnic_restore_indev_addr(netdev, NETDEV_UP); } done: netif_device_attach(netdev); } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } qlcnic_83xx_idc_clear_registers(adapter, 0); QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED); if (lock) qlcnic_83xx_unlock_driver(adapter); qlcnic_83xx_idc_log_state_history(adapter); dev_info(&adapter->pdev->dev, "Device will enter failed state\n"); return 0; } static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_NEED_QUISCENT); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_NEED_RESET); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } /** * qlcnic_83xx_idc_find_reset_owner_id * * @adapter: adapter structure * * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE. * Within the same class, function with lowest PCI ID assumes ownership * * Returns: reset owner id or failure indication (-EIO) * **/ static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter) { u32 reg, reg1, reg2, i, j, owner, class; reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1); reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2); owner = QLCNIC_TYPE_NIC; i = 0; j = 0; reg = reg1; do { class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3); if (class == owner) break; if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) { reg = reg2; j = 0; } else { j++; } if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) { if (owner == QLCNIC_TYPE_NIC) owner = QLCNIC_TYPE_ISCSI; else if (owner == QLCNIC_TYPE_ISCSI) owner = QLCNIC_TYPE_FCOE; else if (owner == QLCNIC_TYPE_FCOE) return -EIO; reg = reg1; j = 0; i = 0; } } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS); return i; } static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock) { int ret = 0; ret = qlcnic_83xx_restart_hw(adapter); if (ret) { qlcnic_83xx_idc_enter_failed_state(adapter, lock); } else { qlcnic_83xx_idc_clear_registers(adapter, lock); ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock); } return ret; } static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) { u32 status; status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); if (status & QLCNIC_RCODE_FATAL_ERROR) { dev_err(&adapter->pdev->dev, "peg halt status1=0x%x\n", status); if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { dev_err(&adapter->pdev->dev, "On board active cooling fan failed. " "Device has been halted.\n"); dev_err(&adapter->pdev->dev, "Replace the adapter.\n"); return -EIO; } } return 0; } int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); qlcnic_83xx_enable_mbx_interrupt(adapter); qlcnic_83xx_initialize_nic(adapter, 1); err = qlcnic_sriov_pf_reinit(adapter); if (err) return err; qlcnic_83xx_enable_mbx_interrupt(adapter); if (qlcnic_83xx_configure_opmode(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } if (adapter->nic_ops->init_driver(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); qlcnic_dcb_get_info(adapter->dcb); qlcnic_83xx_idc_attach_driver(adapter); return 0; } static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); ahw->idc.quiesce_req = 0; ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->idc.err_code = 0; ahw->idc.collect_dump = 0; ahw->reset_context = 0; adapter->tx_timeo_cnt = 0; ahw->idc.delay_reset = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); } /** * qlcnic_83xx_idc_ready_state_entry * * @adapter: adapter structure * * Perform ready state initialization, this routine will get invoked only * once from READY state. * * Returns: Error code or Success(0) * **/ int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) { qlcnic_83xx_idc_update_idc_params(adapter); /* Re-attach the device if required */ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) { if (qlcnic_83xx_idc_reattach_driver(adapter)) return -EIO; } } return 0; } /** * qlcnic_83xx_idc_vnic_pf_entry * * @adapter: adapter structure * * Ensure vNIC mode privileged function starts only after vNIC mode is * enabled by management function. * If vNIC mode is ready, start initialization. * * Returns: -EIO or 0 * **/ int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter) { u32 state; struct qlcnic_hardware_context *ahw = adapter->ahw; /* Privileged function waits till mgmt function enables VNIC mode */ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE); if (state != QLCNIC_DEV_NPAR_OPER) { if (!ahw->idc.vnic_wait_limit--) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } dev_info(&adapter->pdev->dev, "vNIC mode disabled\n"); return -EIO; } else { /* Perform one time initialization from ready state */ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) { qlcnic_83xx_idc_update_idc_params(adapter); /* If the previous state is UNKNOWN, device will be already attached properly by Init routine*/ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) { if (qlcnic_83xx_idc_reattach_driver(adapter)) return -EIO; } adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER; dev_info(&adapter->pdev->dev, "vNIC mode enabled\n"); } } return 0; } static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) { adapter->ahw->idc.err_code = -EIO; dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } /** * qlcnic_83xx_idc_cold_state_handler * * @adapter: adapter structure * * If HW is up and running device will enter READY state. * If firmware image from host needs to be loaded, device is * forced to start with the file firmware image. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter) { qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0); qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0); if (qlcnic_load_fw_file) { qlcnic_83xx_idc_restart_hw(adapter, 0); } else { if (qlcnic_83xx_check_hw_status(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 0); return -EIO; } else { qlcnic_83xx_idc_enter_ready_state(adapter, 0); } } return 0; } /** * qlcnic_83xx_idc_init_state * * @adapter: adapter structure * * Reset owner will restart the device from this state. * Device will enter failed state if it remains * in this state for more than DEV_INIT time limit. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) { int timeout, ret = 0; u32 owner; timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS; if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) { owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (adapter->ahw->pci_func == owner) ret = qlcnic_83xx_idc_restart_hw(adapter, 1); } else { ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); } return ret; } /** * qlcnic_83xx_idc_ready_state * * @adapter: adapter structure * * Perform IDC protocol specicifed actions after monitoring device state and * events. * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; u32 val; /* Perform NIC configuration based ready state entry actions */ if (ahw->idc.state_entry(adapter)) return -EIO; if (qlcnic_check_temp(adapter)) { if (ahw->temp == QLCNIC_TEMP_PANIC) { qlcnic_83xx_idc_check_fan_failure(adapter); dev_err(&adapter->pdev->dev, "Error: device temperature %d above limits\n", adapter->ahw->temp); clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_idc_enter_failed_state(adapter, 1); return -EIO; } } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); ret = qlcnic_83xx_check_heartbeat(adapter); if (ret) { adapter->flags |= QLCNIC_FW_HANG; if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } else { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 1); } return -EIO; } if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { clear_bit(QLC_83XX_MBX_READY, &mbx->status); /* Move to need reset state and prepare for reset */ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); return ret; } /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } /* Move to need quiesce state if requested */ if (adapter->ahw->idc.quiesce_req) { qlcnic_83xx_idc_enter_need_quiesce(adapter, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); return ret; } return ret; } /** * qlcnic_83xx_idc_need_reset_state * * @adapter: adapter structure * * Device will remain in this state until: * Reset request ACK's are received from all the functions * Wait time exceeds max time limit * * Returns: Error code or Success(0) * **/ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); clear_bit(QLC_83XX_MBX_READY, &mbx->status); if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, "%s: Wait for diag completion\n", __func__); adapter->ahw->idc.delay_reset = 1; return 0; } else { qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_detach_driver(adapter); } } if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, "%s: Wait for diag completion\n", __func__); return -1; } else { if (adapter->ahw->idc.delay_reset) { qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_detach_driver(adapter); adapter->ahw->idc.delay_reset = 0; } /* Check for ACK from other functions */ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); if (ret) { dev_info(&adapter->pdev->dev, "%s: Waiting for reset ACK\n", __func__); return -1; } } /* Transit to INIT state and restart the HW */ qlcnic_83xx_idc_enter_init_state(adapter, 1); return ret; } static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__); return 0; } static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 val, owner; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) { qlcnic_83xx_stop_hw(adapter); qlcnic_dump_fw(adapter); } } netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", __func__); clear_bit(__QLCNIC_RESETTING, &adapter->state); ahw->idc.err_code = -EIO; return; } static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) { dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__); return 0; } static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, u32 state) { u32 cur, prev, next; cur = adapter->ahw->idc.curr_state; prev = adapter->ahw->idc.prev_state; next = state; if ((next < QLC_83XX_IDC_DEV_COLD) || (next > QLC_83XX_IDC_DEV_QUISCENT)) { dev_err(&adapter->pdev->dev, "%s: curr %d, prev %d, next state %d is invalid\n", __func__, cur, prev, state); return 1; } if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) && (prev == QLC_83XX_IDC_DEV_UNKNOWN)) { if ((next != QLC_83XX_IDC_DEV_COLD) && (next != QLC_83XX_IDC_DEV_READY)) { dev_err(&adapter->pdev->dev, "%s: failed, cur %d prev %d next %d\n", __func__, cur, prev, next); return 1; } } if (next == QLC_83XX_IDC_DEV_INIT) { if ((prev != QLC_83XX_IDC_DEV_INIT) && (prev != QLC_83XX_IDC_DEV_COLD) && (prev != QLC_83XX_IDC_DEV_NEED_RESET)) { dev_err(&adapter->pdev->dev, "%s: failed, cur %d prev %d next %d\n", __func__, cur, prev, next); return 1; } } return 0; } #define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 #define QLC_83XX_MATCH_ENCAP_ID BIT_2 #define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 #define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16) #define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1 #define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0 int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port) { struct qlcnic_cmd_args cmd; int ret = 0; memset(&cmd, 0, sizeof(cmd)); ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); if (ret) return ret; cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO; cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN | QLC_83XX_SET_VXLAN_UDP_DPORT | QLC_83XX_VXLAN_UDP_DPORT(port); ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) netdev_err(adapter->netdev, "Failed to set VXLAN port %d in adapter\n", port); qlcnic_free_mbx_args(&cmd); return ret; } int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port) { struct qlcnic_cmd_args cmd; int ret = 0; memset(&cmd, 0, sizeof(cmd)); ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_INGRESS_ENCAP); if (ret) return ret; cmd.req.arg[1] = port ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING : QLCNIC_DISABLE_INGRESS_ENCAP_PARSING; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) netdev_err(adapter->netdev, "Failed to %s VXLAN parsing for port %d\n", port ? "enable" : "disable", port); else netdev_info(adapter->netdev, "%s VXLAN parsing for port %d\n", port ? "Enabled" : "Disabled", port); qlcnic_free_mbx_args(&cmd); return ret; } static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); } /** * qlcnic_83xx_idc_poll_dev_state * * @work: kernel work queue structure used to schedule the function * * Poll device state periodically and perform state specific * actions defined by Inter Driver Communication (IDC) protocol. * * Returns: None * **/ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) { struct qlcnic_adapter *adapter; u32 state; adapter = container_of(work, struct qlcnic_adapter, fw_work.work); state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { qlcnic_83xx_idc_log_state_history(adapter); adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; } else { adapter->ahw->idc.curr_state = state; } switch (adapter->ahw->idc.curr_state) { case QLC_83XX_IDC_DEV_READY: qlcnic_83xx_idc_ready_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_RESET: qlcnic_83xx_idc_need_reset_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_QUISCENT: qlcnic_83xx_idc_need_quiesce_state(adapter); break; case QLC_83XX_IDC_DEV_FAILED: qlcnic_83xx_idc_failed_state(adapter); return; case QLC_83XX_IDC_DEV_INIT: qlcnic_83xx_idc_init_state(adapter); break; case QLC_83XX_IDC_DEV_QUISCENT: qlcnic_83xx_idc_quiesce_state(adapter); break; default: qlcnic_83xx_idc_unknown_state(adapter); return; } adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; qlcnic_83xx_periodic_tasks(adapter); /* Re-schedule the function */ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, adapter->ahw->idc.delay); } static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) { u32 idc_params, val; if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, (u8 *)&idc_params, 1)) { dev_info(&adapter->pdev->dev, "%s:failed to get IDC params from flash\n", __func__); adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS; } else { adapter->dev_init_timeo = idc_params & 0xFFFF; adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF); } adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN; adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; adapter->ahw->idc.err_code = 0; adapter->ahw->idc.collect_dump = 0; adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); /* Check if reset recovery is disabled */ if (!qlcnic_auto_fw_reset) { /* Propagate do not reset request to other functions */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } } static int qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) { u32 state, val; if (qlcnic_83xx_lock_driver(adapter)) return -EIO; /* Clear driver lock register */ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0); if (qlcnic_83xx_idc_update_major_version(adapter, 0)) { qlcnic_83xx_unlock_driver(adapter); return -EIO; } state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { qlcnic_83xx_unlock_driver(adapter); return -EIO; } if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) { QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_COLD); state = QLC_83XX_IDC_DEV_COLD; } adapter->ahw->idc.curr_state = state; /* First to load function should cold boot the device */ if (state == QLC_83XX_IDC_DEV_COLD) qlcnic_83xx_idc_cold_state_handler(adapter); /* Check if reset recovery is enabled */ if (qlcnic_auto_fw_reset) { val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } qlcnic_83xx_unlock_driver(adapter); return 0; } int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) { int ret = -EIO; qlcnic_83xx_setup_idc_parameters(adapter); if (qlcnic_83xx_get_reset_instruction_template(adapter)) return ret; if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) { if (qlcnic_83xx_idc_first_to_load_function_handler(adapter)) return -EIO; } else { if (qlcnic_83xx_idc_check_major_version(adapter)) return -EIO; } qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); return 0; } void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter) { int id; u32 val; while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) usleep_range(10000, 11000); id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); id = id & 0xFF; if (id == adapter->portnum) { dev_err(&adapter->pdev->dev, "%s: wait for lock recovery.. %d\n", __func__, id); msleep(20); id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); id = id & 0xFF; } /* Clear driver presence bit */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); val = val & ~(1 << adapter->portnum); QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); clear_bit(__QLCNIC_RESETTING, &adapter->state); cancel_delayed_work_sync(&adapter->fw_work); } void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 val; if (qlcnic_sriov_vf_check(adapter)) return; if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed, please retry\n", __func__); return; } val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 0); qlcnic_83xx_unlock_driver(adapter); return; } if (key == QLCNIC_FORCE_FW_RESET) { val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val = val | QLC_83XX_IDC_GRACEFULL_RESET; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) { adapter->ahw->idc.collect_dump = 1; } qlcnic_83xx_unlock_driver(adapter); return; } static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) { u8 *p_cache; u32 src, size; u64 dest; int ret = -EIO; src = QLC_83XX_BOOTLOADER_FLASH_ADDR; dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR); size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE); /* alignment check */ if (size & 0xF) size = (size + 16) & ~0xF; p_cache = vzalloc(size); if (p_cache == NULL) return -ENOMEM; ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, size / sizeof(u32)); if (ret) { vfree(p_cache); return ret; } /* 16 byte write to MS memory */ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache, size / 16); if (ret) { vfree(p_cache); return ret; } vfree(p_cache); return ret; } static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; u32 dest, *p_cache, *temp; __le32 *temp_le; u8 data[16]; size_t size; int i, ret; u64 addr; temp = vzalloc(fw->size); if (!temp) { release_firmware(fw); fw_info->fw = NULL; return -ENOMEM; } temp_le = (__le32 *)fw->data; /* FW image in file is in little endian, swap the data to nullify * the effect of writel() operation on big endian platform. */ for (i = 0; i < fw->size / sizeof(u32); i++) temp[i] = __le32_to_cpu(temp_le[i]); dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); size = (fw->size & ~0xF); p_cache = temp; addr = (u64)dest; ret = qlcnic_ms_mem_write128(adapter, addr, p_cache, size / 16); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); goto exit; } /* alignment check */ if (fw->size & 0xF) { addr = dest + size; for (i = 0; i < (fw->size & 0xF); i++) data[i] = ((u8 *)temp)[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_ms_mem_write128(adapter, addr, (u32 *)data, 1); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); goto exit; } } exit: release_firmware(fw); fw_info->fw = NULL; vfree(temp); return ret; } static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) { int i, j; u32 val = 0, val1 = 0, reg = 0; int err = 0; val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB Pause Threshold Regs[TC7..TC0]:"); reg = QLC_83XX_PORT0_THRESHOLD; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB Pause Threshold Regs[TC7..TC0]:"); reg = QLC_83XX_PORT1_THRESHOLD; } for (i = 0; i < 8; i++) { val = QLCRD32(adapter, reg + (i * 0x4), &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB TC Max Cell Registers[4..1]:"); reg = QLC_83XX_PORT0_TC_MC_REG; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB TC Max Cell Registers[4..1]:"); reg = QLC_83XX_PORT1_TC_MC_REG; } for (i = 0; i < 4; i++) { val = QLCRD32(adapter, reg + (i * 0x4), &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } for (j = 0; j < 2; j++) { if (j == 0) { dev_info(&adapter->pdev->dev, "Port 0 RxB Rx TC Stats[TC7..TC0]:"); reg = QLC_83XX_PORT0_TC_STATS; } else if (j == 1) { dev_info(&adapter->pdev->dev, "Port 1 RxB Rx TC Stats[TC7..TC0]:"); reg = QLC_83XX_PORT1_TC_STATS; } for (i = 7; i >= 0; i--) { val = QLCRD32(adapter, reg, &err); if (err == -EIO) return; val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ QLCWR32(adapter, reg, (val | (i << 29))); val = QLCRD32(adapter, reg, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "0x%x ", val); } dev_info(&adapter->pdev->dev, "\n"); } val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err); if (err == -EIO) return; val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err); if (err == -EIO) return; dev_info(&adapter->pdev->dev, "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", val, val1); } static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter) { u32 reg = 0, i, j; if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed to acquire driver lock\n", __func__); return; } qlcnic_83xx_dump_pause_control_regs(adapter); QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0); for (j = 0; j < 2; j++) { if (j == 0) reg = QLC_83XX_PORT0_THRESHOLD; else if (j == 1) reg = QLC_83XX_PORT1_THRESHOLD; for (i = 0; i < 8; i++) QLCWR32(adapter, reg + (i * 0x4), 0x0); } for (j = 0; j < 2; j++) { if (j == 0) reg = QLC_83XX_PORT0_TC_MC_REG; else if (j == 1) reg = QLC_83XX_PORT1_TC_MC_REG; for (i = 0; i < 4; i++) QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF); } QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0); QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0); dev_info(&adapter->pdev->dev, "Disabled pause frames successfully on all ports\n"); qlcnic_83xx_unlock_driver(adapter); } static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter) { QLCWR32(adapter, QLC_83XX_RESET_REG, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0); QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0); QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0); QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0); QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0); QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1); } static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) { u32 heartbeat, peg_status; int retries, ret = -EIO, err = 0; retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_ALIVE_COUNTER); do { msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); heartbeat = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != p_dev->heartbeat) { ret = QLCNIC_RCODE_SUCCESS; break; } } while (--retries); if (ret) { dev_err(&p_dev->pdev->dev, "firmware hang detected\n"); qlcnic_83xx_take_eport_out_of_reset(p_dev); qlcnic_83xx_disable_pause_frames(p_dev); peg_status = QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS1); dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", peg_status, QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err), QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err)); if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) dev_err(&p_dev->pdev->dev, "Device is being reset err code 0x00006700.\n"); } return ret; } static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev) { int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; u32 val; do { val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE); if (val == QLC_83XX_CMDPEG_COMPLETE) return 0; msleep(QLCNIC_CMDPEG_CHECK_DELAY); } while (--retries); dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val); return -EIO; } static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev) { int err; err = qlcnic_83xx_check_cmd_peg_status(p_dev); if (err) return err; err = qlcnic_83xx_check_heartbeat(p_dev); if (err) return err; return err; } static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, int duration, u32 mask, u32 status) { int timeout_error, err = 0; u32 value; u8 retries; value = QLCRD32(p_dev, addr, &err); if (err == -EIO) return err; retries = duration / 10; do { if ((value & mask) != status) { timeout_error = 1; msleep(duration / 10); value = QLCRD32(p_dev, addr, &err); if (err == -EIO) return err; } else { timeout_error = 0; break; } } while (retries--); if (timeout_error) { p_dev->ahw->reset.seq_error++; dev_err(&p_dev->pdev->dev, "%s: Timeout Err, entry_num = %d\n", __func__, p_dev->ahw->reset.seq_index); dev_err(&p_dev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", value, mask, status); } return timeout_error; } static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) { u32 sum = 0; u16 *buff = (u16 *)p_dev->ahw->reset.buff; int count = p_dev->ahw->reset.hdr->size / sizeof(u16); while (count-- > 0) sum += *buff++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); if (~sum) { return 0; } else { dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); return -1; } } static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) { struct qlcnic_hardware_context *ahw = p_dev->ahw; u32 addr, count, prev_ver, curr_ver; u8 *p_buff; if (ahw->reset.buff != NULL) { prev_ver = p_dev->fw_version; curr_ver = qlcnic_83xx_get_fw_version(p_dev); if (curr_ver > prev_ver) kfree(ahw->reset.buff); else return 0; } ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); if (ahw->reset.buff == NULL) return -ENOMEM; p_buff = p_dev->ahw->reset.buff; addr = QLC_83XX_RESET_TEMPLATE_ADDR; count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32); /* Copy template header from flash */ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); return -EIO; } ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff; addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size; p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size; count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32); /* Copy rest of the template */ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); return -EIO; } if (qlcnic_83xx_reset_template_checksum(p_dev)) return -EIO; /* Get Stop, Start and Init command offsets */ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset; ahw->reset.start_offset = ahw->reset.buff + ahw->reset.hdr->start_offset; ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size; return 0; } /* Read Write HW register command */ static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, u32 raddr, u32 waddr) { int err = 0; u32 value; value = QLCRD32(p_dev, raddr, &err); if (err == -EIO) return; qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); } /* Read Modify Write HW register command */ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev, u32 raddr, u32 waddr, struct qlc_83xx_rmw *p_rmw_hdr) { int err = 0; u32 value; if (p_rmw_hdr->index_a) { value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; } else { value = QLCRD32(p_dev, raddr, &err); if (err == -EIO) return; } value &= p_rmw_hdr->mask; value <<= p_rmw_hdr->shl; value >>= p_rmw_hdr->shr; value |= p_rmw_hdr->or_value; value ^= p_rmw_hdr->xor_value; qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); } /* Write HW register command */ static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; entry = (struct qlc_83xx_entry *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1, entry->arg2); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } /* Read and Write instruction */ static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; entry = (struct qlc_83xx_entry *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1, entry->arg2); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } /* Poll HW register command */ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { long delay; struct qlc_83xx_entry *entry; struct qlc_83xx_poll *poll; int i, err = 0; unsigned long arg1, arg2; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; if (!delay) { for (i = 0; i < p_hdr->count; i++, entry++) qlcnic_83xx_poll_reg(p_dev, entry->arg1, delay, poll->mask, poll->status); } else { for (i = 0; i < p_hdr->count; i++, entry++) { arg1 = entry->arg1; arg2 = entry->arg2; if (delay) { if (qlcnic_83xx_poll_reg(p_dev, arg1, delay, poll->mask, poll->status)){ QLCRD32(p_dev, arg1, &err); if (err == -EIO) return; QLCRD32(p_dev, arg2, &err); if (err == -EIO) return; } } } } } /* Poll and write HW register command */ static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; long delay; struct qlc_83xx_quad_entry *entry; struct qlc_83xx_poll *poll; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_quad_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr, entry->dr_value); qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, entry->ar_value); if (delay) qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, poll->mask, poll->status); } } /* Read Modify Write register command */ static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { int i; struct qlc_83xx_entry *entry; struct qlc_83xx_rmw *rmw_hdr; rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_entry *)((char *)rmw_hdr + sizeof(struct qlc_83xx_rmw)); for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1, entry->arg2, rmw_hdr); if (p_hdr->delay) udelay((u32)(p_hdr->delay)); } } static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr) { if (p_hdr->delay) mdelay((u32)((long)p_hdr->delay)); } /* Read and poll register command */ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, struct qlc_83xx_entry_hdr *p_hdr) { long delay; int index, i, j, err; struct qlc_83xx_quad_entry *entry; struct qlc_83xx_poll *poll; unsigned long addr; poll = (struct qlc_83xx_poll *)((char *)p_hdr + sizeof(struct qlc_83xx_entry_hdr)); entry = (struct qlc_83xx_quad_entry *)((char *)poll + sizeof(struct qlc_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, entry++) { qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, entry->ar_value); if (delay) { if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, poll->mask, poll->status)){ index = p_dev->ahw->reset.array_index; addr = entry->dr_addr; j = QLCRD32(p_dev, addr, &err); if (err == -EIO) return; p_dev->ahw->reset.array[index++] = j; if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) p_dev->ahw->reset.array_index = 1; } } } } static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_end = 1; } static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.template_end = 1; if (p_dev->ahw->reset.seq_error == 0) dev_err(&p_dev->pdev->dev, "HW restart process completed successfully.\n"); else dev_err(&p_dev->pdev->dev, "HW restart completed with timeout errors.\n"); } /** * qlcnic_83xx_exec_template_cmd * * @p_dev: adapter structure * @p_buff: Poiter to instruction template * * Template provides instructions to stop, restart and initalize firmware. * These instructions are abstracted as a series of read, write and * poll operations on hardware registers. Register information and operation * specifics are not exposed to the driver. Driver reads the template from * flash and executes the instructions located at pre-defined offsets. * * Returns: None * */ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, char *p_buff) { int index, entries; struct qlc_83xx_entry_hdr *p_hdr; char *entry = p_buff; p_dev->ahw->reset.seq_end = 0; p_dev->ahw->reset.template_end = 0; entries = p_dev->ahw->reset.hdr->entries; index = p_dev->ahw->reset.seq_index; for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) { p_hdr = (struct qlc_83xx_entry_hdr *)entry; switch (p_hdr->cmd) { case QLC_83XX_OPCODE_NOP: break; case QLC_83XX_OPCODE_WRITE_LIST: qlcnic_83xx_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_READ_WRITE_LIST: qlcnic_83xx_read_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_POLL_LIST: qlcnic_83xx_poll_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_POLL_WRITE_LIST: qlcnic_83xx_poll_write_list(p_dev, p_hdr); break; case QLC_83XX_OPCODE_READ_MODIFY_WRITE: qlcnic_83xx_read_modify_write(p_dev, p_hdr); break; case QLC_83XX_OPCODE_SEQ_PAUSE: qlcnic_83xx_pause(p_hdr); break; case QLC_83XX_OPCODE_SEQ_END: qlcnic_83xx_seq_end(p_dev); break; case QLC_83XX_OPCODE_TMPL_END: qlcnic_83xx_template_end(p_dev); break; case QLC_83XX_OPCODE_POLL_READ_LIST: qlcnic_83xx_poll_read_list(p_dev, p_hdr); break; default: dev_err(&p_dev->pdev->dev, "%s: Unknown opcode 0x%04x in template %d\n", __func__, p_hdr->cmd, index); break; } entry += p_hdr->size; cond_resched(); } p_dev->ahw->reset.seq_index = index; } static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) { p_dev->ahw->reset.seq_index = 0; qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset); if (p_dev->ahw->reset.seq_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev) { qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset); if (p_dev->ahw->reset.template_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) { qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset); if (p_dev->ahw->reset.seq_end != 1) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } /* POST FW related definations*/ #define QLC_83XX_POST_SIGNATURE_REG 0x41602014 #define QLC_83XX_POST_MODE_REG 0x41602018 #define QLC_83XX_POST_FAST_MODE 0 #define QLC_83XX_POST_MEDIUM_MODE 1 #define QLC_83XX_POST_SLOW_MODE 2 /* POST Timeout values in milliseconds */ #define QLC_83XX_POST_FAST_MODE_TIMEOUT 690 #define QLC_83XX_POST_MED_MODE_TIMEOUT 2930 #define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500 /* POST result values */ #define QLC_83XX_POST_PASS 0xfffffff0 #define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff #define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe #define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc #define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8 static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; struct device *dev = &adapter->pdev->dev; int timeout, count, ret = 0; u32 signature; /* Set timeout values with extra 2 seconds of buffer */ switch (adapter->ahw->post_mode) { case QLC_83XX_POST_FAST_MODE: timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000; break; case QLC_83XX_POST_MEDIUM_MODE: timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000; break; case QLC_83XX_POST_SLOW_MODE: timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000; break; default: return -EINVAL; } strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev); if (ret) { dev_err(dev, "POST firmware can not be loaded, skipping POST\n"); return 0; } ret = qlcnic_83xx_copy_fw_file(adapter); if (ret) return ret; /* clear QLC_83XX_POST_SIGNATURE_REG register */ qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0); /* Set POST mode */ qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG, adapter->ahw->post_mode); QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FILE); qlcnic_83xx_start_hw(adapter); count = 0; do { msleep(100); count += 100; signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG); if (signature == QLC_83XX_POST_PASS) break; } while (timeout > count); if (timeout <= count) { dev_err(dev, "POST timed out, signature = 0x%08x\n", signature); return -EIO; } switch (signature) { case QLC_83XX_POST_PASS: dev_info(dev, "POST passed, Signature = 0x%08x\n", signature); break; case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL: dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n", signature); ret = -EIO; break; case QLC_83XX_POST_DDR_TEST_FAIL: dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n", signature); ret = -EIO; break; case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL: dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n", signature); ret = -EIO; break; case QLC_83XX_POST_FLASH_TEST_FAIL: dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n", signature); ret = -EIO; break; default: dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n", signature); ret = -EIO; break; } return ret; } static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; int err = -EIO; if (request_firmware(&fw_info->fw, fw_info->fw_file_name, &(adapter->pdev->dev))) { dev_err(&adapter->pdev->dev, "No file FW image, loading flash FW image.\n"); QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FLASH); } else { if (qlcnic_83xx_copy_fw_file(adapter)) return err; QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FILE); } return 0; } static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) { u32 val; int err = -EIO; qlcnic_83xx_stop_hw(adapter); /* Collect FW register dump if required */ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) qlcnic_dump_fw(adapter); if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", __func__); qlcnic_83xx_idc_enter_failed_state(adapter, 1); return err; } qlcnic_83xx_init_hw(adapter); if (qlcnic_83xx_copy_bootloader(adapter)) return err; /* Check if POST needs to be run */ if (adapter->ahw->run_post) { err = qlcnic_83xx_run_post(adapter); if (err) return err; /* No need to run POST in next reset sequence */ adapter->ahw->run_post = false; /* Again reset the adapter to load regular firmware */ qlcnic_83xx_stop_hw(adapter); qlcnic_83xx_init_hw(adapter); err = qlcnic_83xx_copy_bootloader(adapter); if (err) return err; } /* Boot either flash image or firmware image from host file system */ if (qlcnic_load_fw_file == 1) { err = qlcnic_83xx_load_fw_image_from_host(adapter); if (err) return err; } else { QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLC_83XX_BOOT_FROM_FLASH); } qlcnic_83xx_start_hw(adapter); if (qlcnic_83xx_check_hw_status(adapter)) return -EIO; return 0; } static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) { int err; struct qlcnic_info nic_info; struct qlcnic_hardware_context *ahw = adapter->ahw; memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); if (err) return -EIO; ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_tx_ques = nic_info.max_tx_ques; ahw->max_rx_ques = nic_info.max_rx_ques; ahw->capabilities = nic_info.capabilities; ahw->max_mac_filters = nic_info.max_mac_filters; ahw->max_mtu = nic_info.max_mtu; /* eSwitch capability indicates vNIC mode. * vNIC and SRIOV are mutually exclusive operational modes. * If SR-IOV capability is detected, SR-IOV physical function * will get initialized in default mode. * SR-IOV virtual function initialization follows a * different code path and opmode. * SRIOV mode has precedence over vNIC mode. */ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) return QLC_83XX_DEFAULT_OPMODE; if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) return QLCNIC_VNIC_MODE; return QLC_83XX_DEFAULT_OPMODE; } int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u16 max_sds_rings, max_tx_rings; int ret; ret = qlcnic_83xx_get_nic_configuration(adapter); if (ret == -EIO) return -EIO; if (ret == QLCNIC_VNIC_MODE) { ahw->nic_mode = QLCNIC_VNIC_MODE; if (qlcnic_83xx_config_vnic_opmode(adapter)) return -EIO; max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; } else if (ret == QLC_83XX_DEFAULT_OPMODE) { ahw->nic_mode = QLCNIC_DEFAULT_MODE; adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; max_sds_rings = QLCNIC_MAX_SDS_RINGS; max_tx_rings = QLCNIC_MAX_TX_RINGS; } else { dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n", __func__, ret); return -EIO; } adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings); adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings); return 0; } static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (ahw->port_type == QLCNIC_XGBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (ahw->port_type == QLCNIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; } adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter) { int err = -EIO; qlcnic_83xx_get_minidump_template(adapter); if (qlcnic_83xx_get_port_info(adapter)) return err; qlcnic_83xx_config_buff_descriptors(adapter); adapter->ahw->msix_supported = !!qlcnic_use_msi_x; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d\n", adapter->ahw->fw_hal_version); return 0; } #define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1)) static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; u32 presence_mask, audit_mask; int status; presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); if (status) return; cmd.req.arg[1] = BIT_31; status = qlcnic_issue_cmd(adapter, &cmd); if (status) dev_err(&adapter->pdev->dev, "Failed to clean up the function resources\n"); qlcnic_free_mbx_args(&cmd); } } static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct pci_dev *pdev = adapter->pdev; struct qlc_83xx_fw_info *fw_info; int err = 0; ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL); if (!ahw->fw_info) { err = -ENOMEM; } else { fw_info = ahw->fw_info; switch (pdev->device) { case PCI_DEVICE_ID_QLOGIC_QLE834X: case PCI_DEVICE_ID_QLOGIC_QLE8830: strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; case PCI_DEVICE_ID_QLOGIC_QLE844X: strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; default: dev_err(&pdev->dev, "%s: Invalid device id\n", __func__); err = -EINVAL; break; } } return err; } static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter) { u8 rx_cnt = QLCNIC_DEF_SDS_RINGS; u8 tx_cnt = QLCNIC_DEF_TX_RINGS; adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; if (!adapter->ahw->msix_supported) { rx_cnt = QLCNIC_SINGLE_RING; tx_cnt = QLCNIC_SINGLE_RING; } /* compute and set drv sds rings */ qlcnic_set_tx_ring_count(adapter, tx_cnt); qlcnic_set_sds_ring_count(adapter, rx_cnt); } int qlcnic_83xx_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err = 0; adapter->rx_mac_learn = false; ahw->msix_supported = !!qlcnic_use_msi_x; /* Check if POST needs to be run */ switch (qlcnic_load_fw_file) { case 2: ahw->post_mode = QLC_83XX_POST_FAST_MODE; ahw->run_post = true; break; case 3: ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE; ahw->run_post = true; break; case 4: ahw->post_mode = QLC_83XX_POST_SLOW_MODE; ahw->run_post = true; break; default: ahw->run_post = false; break; } qlcnic_83xx_init_rings(adapter); err = qlcnic_83xx_init_mailbox_work(adapter); if (err) goto exit; if (qlcnic_sriov_vf_check(adapter)) { err = qlcnic_sriov_vf_init(adapter); if (err) goto detach_mbx; else return err; } if (qlcnic_83xx_read_flash_descriptor_table(adapter) || qlcnic_83xx_read_flash_mfg_id(adapter)) { dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n"); err = -ENOTRECOVERABLE; goto detach_mbx; } err = qlcnic_83xx_check_hw_status(adapter); if (err) goto detach_mbx; err = qlcnic_83xx_get_fw_info(adapter); if (err) goto detach_mbx; err = qlcnic_83xx_idc_init(adapter); if (err) goto detach_mbx; err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto disable_intr; } INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); err = qlcnic_83xx_setup_mbx_intr(adapter); if (err) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); err = qlcnic_dcb_enable(adapter->dcb); if (err) { qlcnic_dcb_free(adapter->dcb); goto disable_mbx_intr; } qlcnic_83xx_initialize_nic(adapter, 1); qlcnic_dcb_get_info(adapter->dcb); /* Configure default, SR-IOV or Virtual NIC mode of operation */ err = qlcnic_83xx_configure_opmode(adapter); if (err) goto disable_mbx_intr; /* Perform operating mode specific initialization */ err = adapter->nic_ops->init_driver(adapter); if (err) goto disable_mbx_intr; /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); return 0; disable_mbx_intr: qlcnic_83xx_free_mbx_intr(adapter); disable_intr: qlcnic_teardown_intr(adapter); detach_mbx: qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_free_mailbox(ahw->mailbox); ahw->mailbox = NULL; exit: return err; } void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; clear_bit(QLC_83XX_MBX_READY, &idc->status); cancel_delayed_work_sync(&adapter->fw_work); if (ahw->nic_mode == QLCNIC_VNIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_initialize_nic(adapter, 0); cancel_delayed_work_sync(&adapter->idc_aen_work); } int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; int ret = 0; u32 owner; /* Mark the previous IDC state as NEED_RESET so * that state_entry() will perform the reattachment * and bringup the device */ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET; owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) { ret = qlcnic_83xx_restart_hw(adapter); if (ret < 0) return ret; qlcnic_83xx_idc_clear_registers(adapter, 0); } ret = idc->state_entry(adapter); return ret; } void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; u32 owner; idc->prev_state = QLC_83XX_IDC_DEV_READY; owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); if (ahw->pci_func == owner) qlcnic_83xx_idc_enter_ready_state(adapter, 0); qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0); }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/types.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include "qlcnic.h" struct qlcnic_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m) #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) static const u32 qlcnic_fw_dump_level[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; static const struct qlcnic_stats qlcnic_gstrings_stats[] = { {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, {"xmit_called", QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)}, {"xmit_finished", QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)}, {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), QLC_OFF(stats.tx_dma_map_error)}, {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), QLC_OFF(stats.rx_dma_map_error)}, {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames), QLC_OFF(stats.encap_lso_frames)}, {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed), QLC_OFF(stats.encap_tx_csummed)}, {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed), QLC_OFF(stats.encap_rx_csummed)}, {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), QLC_OFF(stats.skb_alloc_failure)}, {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), QLC_OFF(stats.mac_filter_limit_overrun)}, {"spurious intr", QLC_SIZEOF(stats.spurious_intr), QLC_OFF(stats.spurious_intr)}, {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr), QLC_OFF(stats.mbx_spurious_intr)}, }; static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { "tx unicast frames", "tx multicast frames", "tx broadcast frames", "tx dropped frames", "tx errors", "tx local frames", "tx numbytes", "rx unicast frames", "rx multicast frames", "rx broadcast frames", "rx dropped frames", "rx errors", "rx local frames", "rx numbytes", }; static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_tx_bytes", "ctx_tx_pkts", "ctx_tx_errors", "ctx_tx_dropped_pkts", "ctx_tx_num_buffers", }; static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = { "mac_tx_frames", "mac_tx_bytes", "mac_tx_mcast_pkts", "mac_tx_bcast_pkts", "mac_tx_pause_cnt", "mac_tx_ctrl_pkt", "mac_tx_lt_64b_pkts", "mac_tx_lt_127b_pkts", "mac_tx_lt_255b_pkts", "mac_tx_lt_511b_pkts", "mac_tx_lt_1023b_pkts", "mac_tx_lt_1518b_pkts", "mac_tx_gt_1518b_pkts", "mac_rx_frames", "mac_rx_bytes", "mac_rx_mcast_pkts", "mac_rx_bcast_pkts", "mac_rx_pause_cnt", "mac_rx_ctrl_pkt", "mac_rx_lt_64b_pkts", "mac_rx_lt_127b_pkts", "mac_rx_lt_255b_pkts", "mac_rx_lt_511b_pkts", "mac_rx_lt_1023b_pkts", "mac_rx_lt_1518b_pkts", "mac_rx_gt_1518b_pkts", "mac_rx_length_error", "mac_rx_length_small", "mac_rx_length_large", "mac_rx_jabber", "mac_rx_dropped", "mac_crc_error", "mac_align_error", "eswitch_frames", "eswitch_bytes", "eswitch_multicast_frames", "eswitch_broadcast_frames", "eswitch_unicast_frames", "eswitch_error_free_frames", "eswitch_error_free_bytes", }; #define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = { "xmit_on", "xmit_off", "xmit_called", "xmit_finished", "tx_bytes", }; #define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings) static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_rx_bytes", "ctx_rx_pkts", "ctx_lro_pkt_cnt", "ctx_ip_csum_error", "ctx_rx_pkts_wo_ctx", "ctx_rx_pkts_drop_wo_sds_on_card", "ctx_rx_pkts_drop_wo_sds_on_host", "ctx_rx_osized_pkts", "ctx_rx_pkts_dropped_wo_rds", "ctx_rx_unexpected_mcast_pkts", "ctx_invalid_mac_address", "ctx_rx_rds_ring_prim_attempted", "ctx_rx_rds_ring_prim_success", "ctx_num_lro_flows_added", "ctx_num_lro_flows_removed", "ctx_num_lro_flows_active", "ctx_pkts_dropped_unknown", }; static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { "Register_Test_on_offline", "Link_Test_on_offline", "Interrupt_Test_offline", "Internal_Loopback_offline", "External_Loopback_offline", "EEPROM_Test_offline" }; #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) { return ARRAY_SIZE(qlcnic_gstrings_stats) + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) { return ARRAY_SIZE(qlcnic_gstrings_stats) + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) { int len = -1; if (qlcnic_82xx_check(adapter)) { len = qlcnic_82xx_statistics(adapter); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) len += ARRAY_SIZE(qlcnic_device_gstrings_stats); } else if (qlcnic_83xx_check(adapter)) { len = qlcnic_83xx_statistics(adapter); } return len; } #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 #define QLCNIC_MAX_EEPROM_LEN 1024 static const u32 diag_registers[] = { QLCNIC_CMDPEG_STATE, QLCNIC_RCVPEG_STATE, QLCNIC_FW_CAPABILITIES, QLCNIC_CRB_DRV_ACTIVE, QLCNIC_CRB_DEV_STATE, QLCNIC_CRB_DRV_STATE, QLCNIC_CRB_DRV_SCRATCH, QLCNIC_CRB_DEV_PARTITION_INFO, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_PEG_ALIVE_COUNTER, QLCNIC_PEG_HALT_STATUS1, QLCNIC_PEG_HALT_STATUS2, -1 }; static const u32 ext_diag_registers[] = { CRB_XG_STATE_P3P, ISR_INT_STATE_REG, QLCNIC_CRB_PEG_NET_0+0x3c, QLCNIC_CRB_PEG_NET_1+0x3c, QLCNIC_CRB_PEG_NET_2+0x3c, QLCNIC_CRB_PEG_NET_4+0x3c, -1 }; #define QLCNIC_MGMT_API_VERSION 3 #define QLCNIC_ETHTOOL_REGS_VER 4 static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter) { int ring_regs_cnt = (adapter->drv_tx_rings * 5) + (adapter->max_rds_rings * 2) + (adapter->drv_sds_rings * 3) + 5; return ring_regs_cnt * sizeof(u32); } static int qlcnic_get_regs_len(struct net_device *dev) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 len; if (qlcnic_83xx_check(adapter)) len = qlcnic_83xx_get_regs_len(adapter); else len = sizeof(ext_diag_registers) + sizeof(diag_registers); len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32)); len += qlcnic_get_ring_regs_len(adapter); return len; } static int qlcnic_get_eeprom_len(struct net_device *dev) { return QLCNIC_FLASH_TOTAL_SIZE; } static void qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 fw_major, fw_minor, fw_build; fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", fw_major, fw_minor, fw_build); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); strscpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, sizeof(drvinfo->version)); } static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter, struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0, err = 0; u16 pcifn = ahw->pci_func; u32 supported, advertising; /* read which mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); ecmd->base.speed = adapter->ahw->link_speed; ecmd->base.duplex = adapter->ahw->link_duplex; ecmd->base.autoneg = adapter->ahw->link_autoneg; } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err); if (val == QLCNIC_PORT_MODE_802_3_AP) { supported = SUPPORTED_1000baseT_Full; advertising = ADVERTISED_1000baseT_Full; } else { supported = SUPPORTED_10000baseT_Full; advertising = ADVERTISED_10000baseT_Full; } if (netif_running(adapter->netdev) && ahw->has_link_events) { if (ahw->linkup) { reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn), &err); speed = P3P_LINK_SPEED_VAL(pcifn, reg); ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; } ecmd->base.speed = ahw->link_speed; ecmd->base.autoneg = ahw->link_autoneg; ecmd->base.duplex = ahw->link_duplex; goto skip; } ecmd->base.speed = SPEED_UNKNOWN; ecmd->base.duplex = DUPLEX_UNKNOWN; ecmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: ecmd->base.phy_address = adapter->ahw->physical_port; switch (adapter->ahw->board_type) { case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; fallthrough; case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: supported |= SUPPORTED_TP; advertising |= ADVERTISED_TP; ecmd->base.port = PORT_TP; ecmd->base.autoneg = adapter->ahw->link_autoneg; break; case QLCNIC_BRDTYPE_P3P_IMEZ: case QLCNIC_BRDTYPE_P3P_XG_LOM: case QLCNIC_BRDTYPE_P3P_HMEZ: supported |= SUPPORTED_MII; advertising |= ADVERTISED_MII; ecmd->base.port = PORT_MII; ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: advertising |= ADVERTISED_TP; supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; fallthrough; case QLCNIC_BRDTYPE_P3P_10G_XFP: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; ecmd->base.port = PORT_FIBRE; ecmd->base.autoneg = AUTONEG_DISABLE; break; case QLCNIC_BRDTYPE_P3P_10G_TP: if (adapter->ahw->port_type == QLCNIC_XGBE) { ecmd->base.autoneg = AUTONEG_DISABLE; supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); ecmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; } else { ecmd->base.autoneg = AUTONEG_ENABLE; supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); ecmd->base.port = PORT_TP; } break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); return -EIO; } if (check_sfp_module) { switch (adapter->ahw->module_type) { case LINKEVENT_MODULE_OPTICAL_UNKNOWN: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: ecmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: ecmd->base.port = PORT_TP; break; default: ecmd->base.port = PORT_OTHER; } } ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, advertising); return 0; } static int qlcnic_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); if (qlcnic_82xx_check(adapter)) return qlcnic_82xx_get_link_ksettings(adapter, ecmd); else if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_get_link_ksettings(adapter, ecmd); return -EIO; } static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, const struct ethtool_link_ksettings *ecmd) { u32 ret = 0, config = 0; /* read which mode */ if (ecmd->base.duplex) config |= 0x1; if (ecmd->base.autoneg) config |= 0x2; switch (ecmd->base.speed) { case SPEED_10: config |= (0 << 8); break; case SPEED_100: config |= (1 << 8); break; case SPEED_1000: config |= (10 << 8); break; default: return -EIO; } ret = qlcnic_fw_cmd_set_port(adapter, config); if (ret == QLCNIC_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; else if (ret) return -EIO; return ret; } static int qlcnic_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *ecmd) { u32 ret = 0; struct qlcnic_adapter *adapter = netdev_priv(dev); if (qlcnic_83xx_check(adapter)) qlcnic_83xx_get_port_type(adapter); if (adapter->ahw->port_type != QLCNIC_GBE) return -EOPNOTSUPP; if (qlcnic_83xx_check(adapter)) ret = qlcnic_83xx_set_link_ksettings(adapter, ecmd); else ret = qlcnic_set_port_config(adapter, ecmd); if (!ret) return ret; adapter->ahw->link_speed = ecmd->base.speed; adapter->ahw->link_duplex = ecmd->base.duplex; adapter->ahw->link_autoneg = ecmd->base.autoneg; if (!netif_running(dev)) return 0; dev->netdev_ops->ndo_stop(dev); return dev->netdev_ops->ndo_open(dev); } static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) { int i, j = 0, err = 0; for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); j = 0; while (ext_diag_registers[j] != -1) regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++], &err); return i; } static void qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_rings; struct qlcnic_host_tx_ring *tx_ring; u32 *regs_buff = p; int ring, i = 0; memset(p, 0, qlcnic_get_regs_len(dev)); regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); regs_buff[1] = QLCNIC_MGMT_API_VERSION; if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) regs_buff[2] = adapter->ahw->max_vnic_func; if (qlcnic_82xx_check(adapter)) i = qlcnic_82xx_get_registers(adapter, regs_buff); else i = qlcnic_83xx_get_registers(adapter, regs_buff); if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return; /* Marker btw regs and TX ring count */ regs_buff[i++] = 0xFFEFCDAB; regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */ for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer)); regs_buff[i++] = tx_ring->sw_consumer; regs_buff[i++] = readl(tx_ring->crb_cmd_producer); regs_buff[i++] = tx_ring->producer; if (tx_ring->crb_intr_mask) regs_buff[i++] = readl(tx_ring->crb_intr_mask); else regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED; } regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */ for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_rings = &recv_ctx->rds_rings[ring]; regs_buff[i++] = readl(rds_rings->crb_rcv_producer); regs_buff[i++] = rds_rings->producer; } regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */ for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &(recv_ctx->sds_rings[ring]); regs_buff[i++] = readl(sds_ring->crb_sts_consumer); regs_buff[i++] = sds_ring->consumer; regs_buff[i++] = readl(sds_ring->crb_intr_mask); } } static u32 qlcnic_test_link(struct net_device *dev) { struct qlcnic_adapter *adapter = netdev_priv(dev); int err = 0; u32 val; if (qlcnic_83xx_check(adapter)) { val = qlcnic_83xx_test_link(adapter); return (val & 1) ? 0 : 1; } val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err); if (err == -EIO) return err; val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); return (val == XG_LINK_UP_P3P) ? 0 : 1; } static int qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct qlcnic_adapter *adapter = netdev_priv(dev); int offset; int ret = -1; if (qlcnic_83xx_check(adapter)) return 0; if (eeprom->len == 0) return -EINVAL; eeprom->magic = (adapter->pdev)->vendor | ((adapter->pdev)->device << 16); offset = eeprom->offset; if (qlcnic_82xx_check(adapter)) ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, eeprom->len); if (ret < 0) return ret; return 0; } static void qlcnic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(dev); ring->rx_pending = adapter->num_rxd; ring->rx_jumbo_pending = adapter->num_jumbo_rxd; ring->tx_pending = adapter->num_txd; ring->rx_max_pending = adapter->max_rxd; ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; ring->tx_max_pending = MAX_CMD_DESCRIPTORS; } static u32 qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) { u32 num_desc; num_desc = max(val, min); num_desc = min(num_desc, max); num_desc = roundup_pow_of_two(num_desc); if (val != num_desc) { printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", qlcnic_driver_name, r_name, num_desc, val); } return num_desc; } static int qlcnic_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(dev); u16 num_rxd, num_jumbo_rxd, num_txd; if (ring->rx_mini_pending) return -EOPNOTSUPP; num_rxd = qlcnic_validate_ringparam(ring->rx_pending, MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, "rx jumbo"); num_txd = qlcnic_validate_ringparam(ring->tx_pending, MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && num_jumbo_rxd == adapter->num_jumbo_rxd) return 0; adapter->num_rxd = num_rxd; adapter->num_jumbo_rxd = num_jumbo_rxd; adapter->num_txd = num_txd; return qlcnic_reset_context(adapter); } static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, u8 rx_ring, u8 tx_ring) { if (rx_ring == 0 || tx_ring == 0) return -EINVAL; if (rx_ring != 0) { if (rx_ring > adapter->max_sds_rings) { netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", rx_ring, adapter->max_sds_rings); return -EINVAL; } } if (tx_ring != 0) { if (tx_ring > adapter->max_tx_rings) { netdev_err(adapter->netdev, "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", tx_ring, adapter->max_tx_rings); return -EINVAL; } } return 0; } static void qlcnic_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct qlcnic_adapter *adapter = netdev_priv(dev); channel->max_rx = adapter->max_sds_rings; channel->max_tx = adapter->max_tx_rings; channel->rx_count = adapter->drv_sds_rings; channel->tx_count = adapter->drv_tx_rings; } static int qlcnic_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct qlcnic_adapter *adapter = netdev_priv(dev); int err; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n"); return -EINVAL; } if (channel->other_count || channel->combined_count) return -EINVAL; err = qlcnic_validate_ring_count(adapter, channel->rx_count, channel->tx_count); if (err) return err; if (adapter->drv_sds_rings != channel->rx_count) { err = qlcnic_validate_rings(adapter, channel->rx_count, QLCNIC_RX_QUEUE); if (err) { netdev_err(dev, "Unable to configure %u SDS rings\n", channel->rx_count); return err; } adapter->drv_rss_rings = channel->rx_count; } if (adapter->drv_tx_rings != channel->tx_count) { err = qlcnic_validate_rings(adapter, channel->tx_count, QLCNIC_TX_QUEUE); if (err) { netdev_err(dev, "Unable to configure %u Tx rings\n", channel->tx_count); return err; } adapter->drv_tss_rings = channel->tx_count; } adapter->flags |= QLCNIC_TSS_RSS; err = qlcnic_setup_rings(adapter); netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n", adapter->drv_sds_rings, adapter->drv_tx_rings); return err; } static void qlcnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int port = adapter->ahw->physical_port; int err = 0; __u32 val; if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_get_pauseparam(adapter, pause); return; } if (adapter->ahw->port_type == QLCNIC_GBE) { if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) return; /* get flow control settings */ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); if (err == -EIO) return; pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); if (err == -EIO) return; switch (port) { case 0: pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); break; case 1: pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); break; case 2: pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); break; case 3: default: pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); break; } } else if (adapter->ahw->port_type == QLCNIC_XGBE) { if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) return; pause->rx_pause = 1; val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); if (err == -EIO) return; if (port == 0) pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); else pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); } else { dev_err(&netdev->dev, "Unknown board type: %x\n", adapter->ahw->port_type); } } static int qlcnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int port = adapter->ahw->physical_port; int err = 0; __u32 val; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_set_pauseparam(adapter, pause); /* read mode */ if (adapter->ahw->port_type == QLCNIC_GBE) { if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) return -EIO; /* set flow control */ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); if (err == -EIO) return err; if (pause->rx_pause) qlcnic_gb_rx_flowctl(val); else qlcnic_gb_unset_rx_flowctl(val); QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); /* set autoneg */ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); if (err == -EIO) return err; switch (port) { case 0: if (pause->tx_pause) qlcnic_gb_unset_gb0_mask(val); else qlcnic_gb_set_gb0_mask(val); break; case 1: if (pause->tx_pause) qlcnic_gb_unset_gb1_mask(val); else qlcnic_gb_set_gb1_mask(val); break; case 2: if (pause->tx_pause) qlcnic_gb_unset_gb2_mask(val); else qlcnic_gb_set_gb2_mask(val); break; case 3: default: if (pause->tx_pause) qlcnic_gb_unset_gb3_mask(val); else qlcnic_gb_set_gb3_mask(val); break; } QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); } else if (adapter->ahw->port_type == QLCNIC_XGBE) { if (!pause->rx_pause || pause->autoneg) return -EOPNOTSUPP; if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) return -EIO; val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); if (err == -EIO) return err; if (port == 0) { if (pause->tx_pause) qlcnic_xg_unset_xg0_mask(val); else qlcnic_xg_set_xg0_mask(val); } else { if (pause->tx_pause) qlcnic_xg_unset_xg1_mask(val); else qlcnic_xg_set_xg1_mask(val); } QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); } else { dev_err(&netdev->dev, "Unknown board type: %x\n", adapter->ahw->port_type); } return 0; } static int qlcnic_reg_test(struct net_device *dev) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 data_read; int err = 0; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_reg_test(adapter); data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err); if (err == -EIO) return err; if ((data_read & 0xffff) != adapter->pdev->vendor) return 1; return 0; } static int qlcnic_eeprom_test(struct net_device *dev) { struct qlcnic_adapter *adapter = netdev_priv(dev); if (qlcnic_82xx_check(adapter)) return 0; return qlcnic_83xx_flash_test(adapter); } static int qlcnic_get_sset_count(struct net_device *dev, int sset) { struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: return qlcnic_dev_statistics_len(adapter); default: return -EOPNOTSUPP; } } static int qlcnic_irq_test(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; int ret, drv_sds_rings = adapter->drv_sds_rings; int drv_tx_rings = adapter->drv_tx_rings; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_interrupt_test(netdev); if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); if (ret) goto clear_diag_irq; ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) goto free_diag_res; cmd.req.arg[1] = ahw->pci_func; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) goto done; usleep_range(1000, 12000); ret = !ahw->diag_cnt; done: qlcnic_free_mbx_args(&cmd); free_diag_res: qlcnic_diag_free_res(netdev, drv_sds_rings); clear_diag_irq: adapter->drv_sds_rings = drv_sds_rings; adapter->drv_tx_rings = drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } #define QLCNIC_ILB_PKT_SIZE 64 #define QLCNIC_NUM_ILB_PKT 16 #define QLCNIC_ILB_MAX_RCV_LOOP 10 #define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1 #define QLCNIC_LB_PKT_POLL_COUNT 20 static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) { static const unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); memcpy(data, mac, ETH_ALEN); memcpy(data + ETH_ALEN, mac, ETH_ALEN); memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); } int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) { unsigned char buff[QLCNIC_ILB_PKT_SIZE]; qlcnic_create_loopback_buff(buff, mac); return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); } int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; struct sk_buff *skb; int i, loop, cnt = 0; for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); if (!skb) goto error; qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); skb_put(skb, QLCNIC_ILB_PKT_SIZE); adapter->ahw->diag_cnt = 0; qlcnic_xmit_frame(skb, adapter->netdev); loop = 0; do { msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC); qlcnic_process_rcv_ring_diag(sds_ring); if (loop++ > QLCNIC_LB_PKT_POLL_COUNT) break; } while (!adapter->ahw->diag_cnt); dev_kfree_skb_any(skb); if (!adapter->ahw->diag_cnt) dev_warn(&adapter->pdev->dev, "LB Test: packet #%d was not received\n", i + 1); else cnt++; } if (cnt != i) { error: dev_err(&adapter->pdev->dev, "LB Test: failed, TX[%d], RX[%d]\n", i, cnt); if (mode != QLCNIC_ILB_MODE) dev_warn(&adapter->pdev->dev, "WARNING: Please check loopback cable\n"); return -1; } return 0; } static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int drv_tx_rings = adapter->drv_tx_rings; int drv_sds_rings = adapter->drv_sds_rings; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_hardware_context *ahw = adapter->ahw; int loop = 0; int ret; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_loopback_test(netdev, mode); if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { dev_info(&adapter->pdev->dev, "Firmware do not support loopback test\n"); return -EOPNOTSUPP; } dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n", mode == QLCNIC_ILB_MODE ? "internal" : "external"); if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { dev_warn(&adapter->pdev->dev, "Loopback test not supported in nonprivileged mode\n"); return 0; } if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); if (ret) goto clear_it; sds_ring = &adapter->recv_ctx->sds_rings[0]; ret = qlcnic_set_lb_mode(adapter, mode); if (ret) goto free_res; ahw->diag_cnt = 0; do { msleep(500); qlcnic_process_rcv_ring_diag(sds_ring); if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { netdev_info(netdev, "Firmware didn't sent link up event to loopback request\n"); ret = -ETIMEDOUT; goto free_res; } else if (adapter->ahw->diag_cnt) { ret = adapter->ahw->diag_cnt; goto free_res; } } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state)); ret = qlcnic_do_lb_test(adapter, mode); qlcnic_clear_lb_mode(adapter, mode); free_res: qlcnic_diag_free_res(netdev, drv_sds_rings); clear_it: adapter->drv_sds_rings = drv_sds_rings; adapter->drv_tx_rings = drv_tx_rings; clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } static void qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) { memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); data[0] = qlcnic_reg_test(dev); if (data[0]) eth_test->flags |= ETH_TEST_FL_FAILED; data[1] = (u64) qlcnic_test_link(dev); if (data[1]) eth_test->flags |= ETH_TEST_FL_FAILED; if (eth_test->flags & ETH_TEST_FL_OFFLINE) { data[2] = qlcnic_irq_test(dev); if (data[2]) eth_test->flags |= ETH_TEST_FL_FAILED; data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); if (data[3]) eth_test->flags |= ETH_TEST_FL_FAILED; if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); if (data[4]) eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; } data[5] = qlcnic_eeprom_test(dev); if (data[5]) eth_test->flags |= ETH_TEST_FL_FAILED; } } static void qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct qlcnic_adapter *adapter = netdev_priv(dev); int index, i, num_stats; switch (stringset) { case ETH_SS_TEST: memcpy(data, *qlcnic_gstrings_test, QLCNIC_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings); for (i = 0; i < adapter->drv_tx_rings; i++) { for (index = 0; index < num_stats; index++) { sprintf(data, "tx_queue_%d %s", i, qlcnic_tx_queue_stats_strings[index]); data += ETH_GSTRING_LEN; } } for (index = 0; index < QLCNIC_STATS_LEN; index++) { memcpy(data + index * ETH_GSTRING_LEN, qlcnic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } if (qlcnic_83xx_check(adapter)) { num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); for (i = 0; i < num_stats; i++, index++) memcpy(data + index * ETH_GSTRING_LEN, qlcnic_83xx_tx_stats_strings[i], ETH_GSTRING_LEN); num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); for (i = 0; i < num_stats; i++, index++) memcpy(data + index * ETH_GSTRING_LEN, qlcnic_83xx_mac_stats_strings[i], ETH_GSTRING_LEN); num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); for (i = 0; i < num_stats; i++, index++) memcpy(data + index * ETH_GSTRING_LEN, qlcnic_83xx_rx_stats_strings[i], ETH_GSTRING_LEN); return; } else { num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); for (i = 0; i < num_stats; i++, index++) memcpy(data + index * ETH_GSTRING_LEN, qlcnic_83xx_mac_stats_strings[i], ETH_GSTRING_LEN); } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats); for (i = 0; i < num_stats; index++, i++) { memcpy(data + index * ETH_GSTRING_LEN, qlcnic_device_gstrings_stats[i], ETH_GSTRING_LEN); } } } static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) { if (type == QLCNIC_MAC_STATS) { struct qlcnic_mac_statistics *mac_stats = (struct qlcnic_mac_statistics *)stats; *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error); *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error); } else if (type == QLCNIC_ESW_STATS) { struct __qlcnic_esw_statistics *esw_stats = (struct __qlcnic_esw_statistics *)stats; *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->errors); *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); } return data; } void qlcnic_update_stats(struct qlcnic_adapter *adapter) { struct qlcnic_tx_queue_stats tx_stats; struct qlcnic_host_tx_ring *tx_ring; int ring; memset(&tx_stats, 0, sizeof(tx_stats)); for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; tx_stats.xmit_on += tx_ring->tx_stats.xmit_on; tx_stats.xmit_off += tx_ring->tx_stats.xmit_off; tx_stats.xmit_called += tx_ring->tx_stats.xmit_called; tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished; tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes; } adapter->stats.xmit_on = tx_stats.xmit_on; adapter->stats.xmit_off = tx_stats.xmit_off; adapter->stats.xmitcalled = tx_stats.xmit_called; adapter->stats.xmitfinished = tx_stats.xmit_finished; adapter->stats.txbytes = tx_stats.tx_bytes; } static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats) { struct qlcnic_host_tx_ring *tx_ring; tx_ring = (struct qlcnic_host_tx_ring *)stats; *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on); *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off); *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called); *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished); *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes); return data; } static void qlcnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; int index, ret, length, size, ring; char *p; memset(data, 0, stats->n_stats * sizeof(u64)); for (ring = 0; ring < adapter->drv_tx_rings; ring++) { if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { tx_ring = &adapter->tx_ring[ring]; data = qlcnic_fill_tx_queue_stats(data, tx_ring); qlcnic_update_stats(adapter); } else { data += QLCNIC_TX_STATS_LEN; } } length = QLCNIC_STATS_LEN; for (index = 0; index < length; index++) { p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; size = qlcnic_gstrings_stats[index].sizeof_stat; *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p)); } if (qlcnic_83xx_check(adapter)) { if (adapter->ahw->linkup) qlcnic_83xx_get_stats(adapter, data); return; } else { /* Retrieve MAC statistics from firmware */ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); qlcnic_get_mac_stats(adapter, &mac_stats); data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); if (ret) return; data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) return; qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS); } static int qlcnic_set_led(struct net_device *dev, enum ethtool_phys_id_state state) { struct qlcnic_adapter *adapter = netdev_priv(dev); int drv_sds_rings = adapter->drv_sds_rings; int err = -EIO, active = 1; if (qlcnic_83xx_check(adapter)) return qlcnic_83xx_set_led(dev, state); if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(dev, "LED test not supported for non " "privilege function\n"); return -EOPNOTSUPP; } switch (state) { case ETHTOOL_ID_ACTIVE: if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) return -EBUSY; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) break; set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); } if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) { err = 0; break; } dev_err(&adapter->pdev->dev, "Failed to set LED blink state.\n"); break; case ETHTOOL_ID_INACTIVE: active = 0; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) break; set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); } if (adapter->nic_ops->config_led(adapter, 0, 0xf)) dev_err(&adapter->pdev->dev, "Failed to reset LED blink state.\n"); break; default: return -EINVAL; } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(dev, drv_sds_rings); if (!active || err) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); return err; } static void qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 wol_cfg; int err = 0; if (qlcnic_83xx_check(adapter)) return; wol->supported = 0; wol->wolopts = 0; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); if (err == -EIO) return; if (wol_cfg & (1UL << adapter->portnum)) wol->supported |= WAKE_MAGIC; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); if (wol_cfg & (1UL << adapter->portnum)) wol->wolopts |= WAKE_MAGIC; } static int qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct qlcnic_adapter *adapter = netdev_priv(dev); u32 wol_cfg; int err = 0; if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); if (err == -EIO) return err; if (!(wol_cfg & (1 << adapter->portnum))) return -EOPNOTSUPP; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); if (err == -EIO) return err; if (wol->wolopts & WAKE_MAGIC) wol_cfg |= 1UL << adapter->portnum; else wol_cfg &= ~(1UL << adapter->portnum); QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); return 0; } /* * Set the coalescing parameters. Currently only normal is supported. * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the * firmware coalescing to default. */ static int qlcnic_set_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *ethcoal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) return -EINVAL; /* * Return Error if unsupported values or * unsupported parameters are set. */ if (ethcoal->rx_coalesce_usecs > 0xffff || ethcoal->rx_max_coalesced_frames > 0xffff || ethcoal->tx_coalesce_usecs > 0xffff || ethcoal->tx_max_coalesced_frames > 0xffff) return -EINVAL; err = qlcnic_config_intr_coalesce(adapter, ethcoal); return err; } static int qlcnic_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *ethcoal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return -EINVAL; ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us; ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets; return 0; } static u32 qlcnic_get_msglevel(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); return adapter->ahw->msg_enable; } static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) { struct qlcnic_adapter *adapter = netdev_priv(netdev); adapter->ahw->msg_enable = msglvl; } int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; u32 val; if (qlcnic_84xx_check(adapter)) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); qlcnic_83xx_unlock_driver(adapter); } else { fw_dump->enable = true; } dev_info(&adapter->pdev->dev, "FW dump enabled\n"); return 0; } static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; u32 val; if (qlcnic_84xx_check(adapter)) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); val |= QLC_83XX_IDC_DISABLE_FW_DUMP; QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); qlcnic_83xx_unlock_driver(adapter); } else { fw_dump->enable = false; } dev_info(&adapter->pdev->dev, "FW dump disabled\n"); return 0; } bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; bool state; u32 val; if (qlcnic_84xx_check(adapter)) { val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true; } else { state = fw_dump->enable; } return state; } static int qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; if (!fw_dump->tmpl_hdr) { netdev_err(adapter->netdev, "FW Dump not supported\n"); return -ENOTSUPP; } if (fw_dump->clr) dump->len = fw_dump->tmpl_hdr_size + fw_dump->size; else dump->len = 0; if (!qlcnic_check_fw_dump_state(adapter)) dump->flag = ETH_FW_DUMP_DISABLE; else dump->flag = fw_dump->cap_mask; dump->version = adapter->fw_version; return 0; } static int qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { int i, copy_sz; u32 *hdr_ptr; __le32 *data; struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW Dump not supported\n"); return -ENOTSUPP; } if (!fw_dump->clr) { netdev_info(netdev, "Dump not available\n"); return -EINVAL; } /* Copy template header first */ copy_sz = fw_dump->tmpl_hdr_size; hdr_ptr = (u32 *)fw_dump->tmpl_hdr; data = buffer; for (i = 0; i < copy_sz/sizeof(u32); i++) *data++ = cpu_to_le32(*hdr_ptr++); /* Copy captured dump data */ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); dump->len = copy_sz + fw_dump->size; dump->flag = fw_dump->cap_mask; /* Free dump area once data has been captured */ vfree(fw_dump->data); fw_dump->data = NULL; fw_dump->clr = 0; netdev_info(netdev, "extracted the FW dump Successfully\n"); return 0; } static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct net_device *netdev = adapter->netdev; if (!qlcnic_check_fw_dump_state(adapter)) { netdev_info(netdev, "Can not change driver mask to 0x%x. FW dump not enabled\n", mask); return -EOPNOTSUPP; } fw_dump->cap_mask = mask; /* Store new capture mask in template header as well*/ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask); netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); return 0; } static int qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; bool valid_mask = false; int i, ret = 0; switch (val->flag) { case QLCNIC_FORCE_FW_DUMP_KEY: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); ret = -EOPNOTSUPP; break; } if (!qlcnic_check_fw_dump_state(adapter)) { netdev_info(netdev, "FW dump not enabled\n"); ret = -EOPNOTSUPP; break; } if (fw_dump->clr) { netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); break; } netdev_info(netdev, "Forcing a FW dump\n"); qlcnic_dev_request_reset(adapter, val->flag); break; case QLCNIC_DISABLE_FW_DUMP: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); ret = -EOPNOTSUPP; break; } ret = qlcnic_disable_fw_dump_state(adapter); break; case QLCNIC_ENABLE_FW_DUMP: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); ret = -EOPNOTSUPP; break; } ret = qlcnic_enable_fw_dump_state(adapter); break; case QLCNIC_FORCE_FW_RESET: netdev_info(netdev, "Forcing a FW reset\n"); qlcnic_dev_request_reset(adapter, val->flag); adapter->flags &= ~QLCNIC_FW_RESET_OWNER; break; case QLCNIC_SET_QUIESCENT: case QLCNIC_RESET_QUIESCENT: if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) netdev_info(netdev, "Device is in non-operational state\n"); break; default: if (!fw_dump->tmpl_hdr) { netdev_err(netdev, "FW dump not supported\n"); ret = -EOPNOTSUPP; break; } for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { if (val->flag == qlcnic_fw_dump_level[i]) { valid_mask = true; break; } } if (valid_mask) { ret = qlcnic_set_dump_mask(adapter, val->flag); } else { netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); ret = -EINVAL; } } return ret; } const struct ethtool_ops qlcnic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, .get_link = ethtool_op_get_link, .get_eeprom_len = qlcnic_get_eeprom_len, .get_eeprom = qlcnic_get_eeprom, .get_ringparam = qlcnic_get_ringparam, .set_ringparam = qlcnic_set_ringparam, .get_channels = qlcnic_get_channels, .set_channels = qlcnic_set_channels, .get_pauseparam = qlcnic_get_pauseparam, .set_pauseparam = qlcnic_set_pauseparam, .get_wol = qlcnic_get_wol, .set_wol = qlcnic_set_wol, .self_test = qlcnic_diag_test, .get_strings = qlcnic_get_strings, .get_ethtool_stats = qlcnic_get_ethtool_stats, .get_sset_count = qlcnic_get_sset_count, .get_coalesce = qlcnic_get_intr_coalesce, .set_coalesce = qlcnic_set_intr_coalesce, .set_phys_id = qlcnic_set_led, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, .get_dump_flag = qlcnic_get_dump_flag, .get_dump_data = qlcnic_get_dump_data, .set_dump = qlcnic_set_dump, .get_link_ksettings = qlcnic_get_link_ksettings, .set_link_ksettings = qlcnic_set_link_ksettings, }; const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = qlcnic_get_drvinfo, .get_regs_len = qlcnic_get_regs_len, .get_regs = qlcnic_get_regs, .get_link = ethtool_op_get_link, .get_eeprom_len = qlcnic_get_eeprom_len, .get_eeprom = qlcnic_get_eeprom, .get_ringparam = qlcnic_get_ringparam, .set_ringparam = qlcnic_set_ringparam, .get_channels = qlcnic_get_channels, .get_pauseparam = qlcnic_get_pauseparam, .get_wol = qlcnic_get_wol, .get_strings = qlcnic_get_strings, .get_ethtool_stats = qlcnic_get_ethtool_stats, .get_sset_count = qlcnic_get_sset_count, .get_coalesce = qlcnic_get_intr_coalesce, .set_coalesce = qlcnic_set_intr_coalesce, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, .get_link_ksettings = qlcnic_get_link_ksettings, }; const struct ethtool_ops qlcnic_ethtool_failed_ops = { .get_drvinfo = qlcnic_get_drvinfo, .set_msglevel = qlcnic_set_msglevel, .get_msglevel = qlcnic_get_msglevel, .set_dump = qlcnic_set_dump, .get_link_ksettings = qlcnic_get_link_ksettings, };
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> #include <net/checksum.h> #include <linux/printk.h> #include <linux/jiffies.h> #include "qlcnic.h" #define QLCNIC_TX_ETHER_PKT 0x01 #define QLCNIC_TX_TCP_PKT 0x02 #define QLCNIC_TX_UDP_PKT 0x03 #define QLCNIC_TX_IP_PKT 0x04 #define QLCNIC_TX_TCP_LSO 0x05 #define QLCNIC_TX_TCP_LSO6 0x06 #define QLCNIC_TX_ENCAP_PKT 0x07 #define QLCNIC_TX_ENCAP_LSO 0x08 #define QLCNIC_TX_TCPV6_PKT 0x0b #define QLCNIC_TX_UDPV6_PKT 0x0c #define QLCNIC_FLAGS_VLAN_TAGGED 0x10 #define QLCNIC_FLAGS_VLAN_OOB 0x40 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ (cmd_desc)->vlan_TCI = cpu_to_le16(v); #define qlcnic_set_cmd_desc_port(cmd_desc, var) \ ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) #define qlcnic_set_tx_port(_desc, _port) \ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ ((_desc)->flags_opcode |= \ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ ((_desc)->nfrags__length = \ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) /* owner bits of status_desc */ #define STATUS_OWNER_HOST (0x1ULL << 56) #define STATUS_OWNER_PHANTOM (0x2ULL << 56) /* Status descriptor: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset 53-55 desc_cnt, 56-57 owner, 58-63 opcode */ #define qlcnic_get_sts_port(sts_data) \ ((sts_data) & 0x0F) #define qlcnic_get_sts_status(sts_data) \ (((sts_data) >> 4) & 0x0F) #define qlcnic_get_sts_type(sts_data) \ (((sts_data) >> 8) & 0x0F) #define qlcnic_get_sts_totallength(sts_data) \ (((sts_data) >> 12) & 0xFFFF) #define qlcnic_get_sts_refhandle(sts_data) \ (((sts_data) >> 28) & 0xFFFF) #define qlcnic_get_sts_prot(sts_data) \ (((sts_data) >> 44) & 0x0F) #define qlcnic_get_sts_pkt_offset(sts_data) \ (((sts_data) >> 48) & 0x1F) #define qlcnic_get_sts_desc_cnt(sts_data) \ (((sts_data) >> 53) & 0x7) #define qlcnic_get_sts_opcode(sts_data) \ (((sts_data) >> 58) & 0x03F) #define qlcnic_get_lro_sts_refhandle(sts_data) \ ((sts_data) & 0x07FFF) #define qlcnic_get_lro_sts_length(sts_data) \ (((sts_data) >> 16) & 0x0FFFF) #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ (((sts_data) >> 32) & 0x0FF) #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ (((sts_data) >> 40) & 0x0FF) #define qlcnic_get_lro_sts_timestamp(sts_data) \ (((sts_data) >> 48) & 0x1) #define qlcnic_get_lro_sts_type(sts_data) \ (((sts_data) >> 49) & 0x7) #define qlcnic_get_lro_sts_push_flag(sts_data) \ (((sts_data) >> 52) & 0x1) #define qlcnic_get_lro_sts_seq_number(sts_data) \ ((sts_data) & 0x0FFFFFFFF) #define qlcnic_get_lro_sts_mss(sts_data1) \ ((sts_data1 >> 32) & 0x0FFFF) #define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff) /* opcode field in status_desc */ #define QLCNIC_SYN_OFFLOAD 0x03 #define QLCNIC_RXPKT_DESC 0x04 #define QLCNIC_OLD_RXPKT_DESC 0x3f #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 #define QLCNIC_TCP_HDR_SIZE 20 #define QLCNIC_TCP_TS_OPTION_SIZE 12 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) #define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM) #define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE) /* for status field in status_desc */ #define STATUS_CKSUM_LOOP 0 #define STATUS_CKSUM_OK 2 #define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF) #define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF) #define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7) #define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF) #define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF) #define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF) #define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF) #define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF) #define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7) #define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1) #define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1) #define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1) #define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1) static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, struct qlcnic_host_rds_ring *, u16, u16); static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan) { return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff)); } static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, u16 handle, u8 ring_id) { if (qlcnic_83xx_check(adapter)) return handle | (ring_id << 15); else return handle; } static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data) { return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; } static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter, struct qlcnic_filter *fil, void *addr, u16 vlan_id) { int ret; u8 op; op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); if (ret) return; op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); if (!ret) { hlist_del(&fil->fnode); adapter->rx_fhash.fnum--; } } static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head, void *addr, u16 vlan_id) { struct qlcnic_filter *tmp_fil = NULL; struct hlist_node *n; hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (ether_addr_equal(tmp_fil->faddr, addr) && tmp_fil->vlan_id == vlan_id) return tmp_fil; } return NULL; } static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb, int loopback_pkt, u16 vlan_id) { struct ethhdr *phdr = (struct ethhdr *)(skb->data); struct qlcnic_filter *fil, *tmp_fil; struct hlist_head *head; unsigned long time; u64 src_addr = 0; u8 hindex, op; int ret; if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff)) vlan_id = 0; memcpy(&src_addr, phdr->h_source, ETH_ALEN); hindex = qlcnic_mac_hash(src_addr, vlan_id) & (adapter->fhash.fbucket_size - 1); if (loopback_pkt) { if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) return; head = &(adapter->rx_fhash.fhead[hindex]); tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); if (tmp_fil) { time = tmp_fil->ftime; if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time)) tmp_fil->ftime = jiffies; return; } fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); if (!fil) return; fil->ftime = jiffies; memcpy(fil->faddr, &src_addr, ETH_ALEN); fil->vlan_id = vlan_id; spin_lock(&adapter->rx_mac_learn_lock); hlist_add_head(&(fil->fnode), head); adapter->rx_fhash.fnum++; spin_unlock(&adapter->rx_mac_learn_lock); } else { head = &adapter->fhash.fhead[hindex]; spin_lock(&adapter->mac_learn_lock); tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); if (tmp_fil) { op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr, vlan_id, op); if (!ret) { hlist_del(&tmp_fil->fnode); adapter->fhash.fnum--; } spin_unlock(&adapter->mac_learn_lock); return; } spin_unlock(&adapter->mac_learn_lock); head = &adapter->rx_fhash.fhead[hindex]; spin_lock(&adapter->rx_mac_learn_lock); tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); if (tmp_fil) qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr, vlan_id); spin_unlock(&adapter->rx_mac_learn_lock); } } void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) { struct cmd_desc_type0 *hwdesc; struct qlcnic_nic_req *req; struct qlcnic_mac_req *mac_req; struct qlcnic_vlan_req *vlan_req; u32 producer; u64 word; producer = tx_ring->producer; hwdesc = &tx_ring->desc_head[tx_ring->producer]; req = (struct qlcnic_nic_req *)hwdesc; memset(req, 0, sizeof(struct qlcnic_nic_req)); req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); req->req_hdr = cpu_to_le64(word); mac_req = (struct qlcnic_mac_req *)&(req->words[0]); mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; memcpy(mac_req->mac_addr, uaddr, ETH_ALEN); vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; vlan_req->vlan_id = cpu_to_le16(vlan_id); tx_ring->producer = get_next_index(producer, tx_ring->num_desc); smp_mb(); } static void qlcnic_send_filter(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) { struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); struct ethhdr *phdr = (struct ethhdr *)(skb->data); u16 protocol = ntohs(skb->protocol); struct qlcnic_filter *fil, *tmp_fil; struct hlist_head *head; struct hlist_node *n; u64 src_addr = 0; u16 vlan_id = 0; u8 hindex, hval; if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) return; if (adapter->flags & QLCNIC_VLAN_FILTERING) { if (protocol == ETH_P_8021Q) { vh = skb_vlan_eth_hdr(skb); vlan_id = ntohs(vh->h_vlan_TCI); } else if (skb_vlan_tag_present(skb)) { vlan_id = skb_vlan_tag_get(skb); } } memcpy(&src_addr, phdr->h_source, ETH_ALEN); hval = qlcnic_mac_hash(src_addr, vlan_id); hindex = hval & (adapter->fhash.fbucket_size - 1); head = &(adapter->fhash.fhead[hindex]); hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) && tmp_fil->vlan_id == vlan_id) { if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); tmp_fil->ftime = jiffies; return; } } if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) { adapter->stats.mac_filter_limit_overrun++; return; } fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); if (!fil) return; qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); fil->ftime = jiffies; fil->vlan_id = vlan_id; memcpy(fil->faddr, &src_addr, ETH_ALEN); spin_lock(&adapter->mac_learn_lock); hlist_add_head(&(fil->fnode), head); adapter->fhash.fnum++; spin_unlock(&adapter->mac_learn_lock); } #define QLCNIC_ENCAP_VXLAN_PKT BIT_0 #define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1 #define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2 #define QLCNIC_ENCAP_INNER_L4_UDP BIT_3 #define QLCNIC_ENCAP_DO_L3_CSUM BIT_4 #define QLCNIC_ENCAP_DO_L4_CSUM BIT_5 static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) { u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0; int copied, copy_len, descr_size; u32 producer = tx_ring->producer; struct cmd_desc_type0 *hwdesc; u16 flags = 0, encap_descr = 0; opcode = QLCNIC_TX_ETHER_PKT; encap_descr = QLCNIC_ENCAP_VXLAN_PKT; if (skb_is_gso(skb)) { inner_hdr_len = skb_inner_transport_header(skb) + inner_tcp_hdrlen(skb) - skb_inner_mac_header(skb); /* VXLAN header size = 8 */ outer_hdr_len = skb_transport_offset(skb) + 8 + sizeof(struct udphdr); first_desc->outer_hdr_length = outer_hdr_len; total_hdr_len = inner_hdr_len + outer_hdr_len; encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM | QLCNIC_ENCAP_DO_L4_CSUM; first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->hdr_length = inner_hdr_len; /* Copy inner and outer headers in Tx descriptor(s) * If total_hdr_len > cmd_desc_type0, use multiple * descriptors */ copied = 0; descr_size = (int)sizeof(struct cmd_desc_type0); while (copied < total_hdr_len) { copy_len = min(descr_size, (total_hdr_len - copied)); hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; skb_copy_from_linear_data_offset(skb, copied, (char *)hwdesc, copy_len); copied += copy_len; producer = get_next_index(producer, tx_ring->num_desc); } tx_ring->producer = producer; /* Make sure updated tx_ring->producer is visible * for qlcnic_tx_avail() */ smp_mb(); adapter->stats.encap_lso_frames++; opcode = QLCNIC_TX_ENCAP_LSO; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (inner_ip_hdr(skb)->version == 6) { if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; } else { if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; } adapter->stats.encap_tx_csummed++; opcode = QLCNIC_TX_ENCAP_PKT; } /* Prepare first 16 bits of byte offset 16 of Tx descriptor */ if (ip_hdr(skb)->version == 6) encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6; /* outer IP header's size in 32bit words size*/ encap_descr |= (skb_network_header_len(skb) >> 2) << 6; /* outer IP header offset */ encap_descr |= skb_network_offset(skb) << 10; first_desc->encap_descr = cpu_to_le16(encap_descr); first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) - skb->data; first_desc->ip_hdr_offset = skb_inner_network_offset(skb); qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); return 0; } static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) { u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0; u16 flags = 0, vlan_tci = 0; int copied, offset, copy_len, size; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; u16 protocol = ntohs(skb->protocol); u32 producer = tx_ring->producer; if (protocol == ETH_P_8021Q) { vh = skb_vlan_eth_hdr(skb); flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); tag_vlan = 1; } else if (skb_vlan_tag_present(skb)) { flags = QLCNIC_FLAGS_VLAN_OOB; vlan_tci = skb_vlan_tag_get(skb); tag_vlan = 1; } if (unlikely(adapter->tx_pvid)) { if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) return -EIO; if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED)) goto set_flags; flags = QLCNIC_FLAGS_VLAN_OOB; vlan_tci = adapter->tx_pvid; } set_flags: qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); if (*(skb->data) & BIT_0) { flags |= BIT_0; memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); } opcode = QLCNIC_TX_ETHER_PKT; if (skb_is_gso(skb)) { hdr_len = skb_tcp_all_headers(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->hdr_length = hdr_len; opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 : QLCNIC_TX_TCP_LSO; /* For LSO, we need to copy the MAC/IP/TCP headers into * the descriptor ring */ copied = 0; offset = 2; if (flags & QLCNIC_FLAGS_VLAN_OOB) { first_desc->hdr_length += VLAN_HLEN; first_desc->tcp_hdr_offset = VLAN_HLEN; first_desc->ip_hdr_offset = VLAN_HLEN; /* Only in case of TSO on vlan device */ flags |= QLCNIC_FLAGS_VLAN_TAGGED; /* Create a TSO vlan header template for firmware */ hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, hdr_len + VLAN_HLEN); vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); skb_copy_from_linear_data(skb, vh, 12); vh->h_vlan_proto = htons(ETH_P_8021Q); vh->h_vlan_TCI = htons(vlan_tci); skb_copy_from_linear_data_offset(skb, 12, (char *)vh + 16, copy_len - 16); copied = copy_len - VLAN_HLEN; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } while (copied < hdr_len) { size = (int)sizeof(struct cmd_desc_type0) - offset; copy_len = min(size, (hdr_len - copied)); hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; skb_copy_from_linear_data_offset(skb, copied, (char *)hwdesc + offset, copy_len); copied += copy_len; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } tx_ring->producer = producer; smp_mb(); adapter->stats.lso_frames++; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (protocol == ETH_P_IP) { l4proto = ip_hdr(skb)->protocol; if (l4proto == IPPROTO_TCP) opcode = QLCNIC_TX_TCP_PKT; else if (l4proto == IPPROTO_UDP) opcode = QLCNIC_TX_UDP_PKT; } else if (protocol == ETH_P_IPV6) { l4proto = ipv6_hdr(skb)->nexthdr; if (l4proto == IPPROTO_TCP) opcode = QLCNIC_TX_TCPV6_PKT; else if (l4proto == IPPROTO_UDP) opcode = QLCNIC_TX_UDPV6_PKT; } } first_desc->tcp_hdr_offset += skb_transport_offset(skb); first_desc->ip_hdr_offset += skb_network_offset(skb); qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); return 0; } static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) { struct qlcnic_skb_frag *nf; skb_frag_t *frag; int i, nr_frags; dma_addr_t map; nr_frags = skb_shinfo(skb)->nr_frags; nf = &pbuf->frag_array[0]; map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, map)) goto out_err; nf->dma = map; nf->length = skb_headlen(skb); for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nf = &pbuf->frag_array[i+1]; map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, map)) goto unwind; nf->dma = map; nf->length = skb_frag_size(frag); } return 0; unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); out_err: return -ENOMEM; } static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf) { struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; int i, nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { nf = &pbuf->frag_array[i+1]; dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); pbuf->skb = NULL; } static inline void qlcnic_clear_cmddesc(u64 *desc) { desc[0] = 0ULL; desc[2] = 0ULL; desc[7] = 0ULL; } netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_cmd_buffer *pbuf; struct qlcnic_skb_frag *buffrag; struct cmd_desc_type0 *hwdesc, *first_desc; struct pci_dev *pdev; struct ethhdr *phdr; int i, k, frag_count, delta = 0; u32 producer, num_txd; u16 protocol; bool l4_is_udp = false; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_tx_stop_all_queues(netdev); return NETDEV_TX_BUSY; } if (adapter->flags & QLCNIC_MACSPOOF) { phdr = (struct ethhdr *)skb->data; if (!ether_addr_equal(phdr->h_source, adapter->mac_addr)) goto drop_packet; } tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; /* 14 frags supported for normal packet and * 32 frags supported for TSO packet */ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); if (!__pskb_pull_tail(skb, delta)) goto drop_packet; frag_count = 1 + skb_shinfo(skb)->nr_frags; } if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_tx_stop_queue(tx_ring->txq); if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_tx_start_queue(tx_ring->txq); } else { tx_ring->tx_stats.xmit_off++; return NETDEV_TX_BUSY; } } producer = tx_ring->producer; pbuf = &tx_ring->cmd_buf_arr[producer]; pdev = adapter->pdev; first_desc = &tx_ring->desc_head[producer]; hwdesc = &tx_ring->desc_head[producer]; qlcnic_clear_cmddesc((u64 *)hwdesc); if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { adapter->stats.tx_dma_map_error++; goto drop_packet; } pbuf->skb = skb; pbuf->frag_count = frag_count; qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); qlcnic_set_tx_port(first_desc, adapter->portnum); for (i = 0; i < frag_count; i++) { k = i % 4; if ((k == 0) && (i > 0)) { /* move to next desc.*/ producer = get_next_index(producer, num_txd); hwdesc = &tx_ring->desc_head[producer]; qlcnic_clear_cmddesc((u64 *)hwdesc); tx_ring->cmd_buf_arr[producer].skb = NULL; } buffrag = &pbuf->frag_array[i]; hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); switch (k) { case 0: hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); break; case 1: hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); break; case 2: hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); break; case 3: hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); break; } } tx_ring->producer = get_next_index(producer, num_txd); smp_mb(); protocol = ntohs(skb->protocol); if (protocol == ETH_P_IP) l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP; else if (protocol == ETH_P_IPV6) l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; /* Check if it is a VXLAN packet */ if (!skb->encapsulation || !l4_is_udp || !qlcnic_encap_tx_offload(adapter)) { if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) goto unwind_buff; } else { if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc, skb, tx_ring))) goto unwind_buff; } if (adapter->drv_mac_learn) qlcnic_send_filter(adapter, first_desc, skb, tx_ring); tx_ring->tx_stats.tx_bytes += skb->len; tx_ring->tx_stats.xmit_called++; /* Ensure writes are complete before HW fetches Tx descriptors */ wmb(); qlcnic_update_cmd_producer(tx_ring); return NETDEV_TX_OK; unwind_buff: qlcnic_unmap_buffers(pdev, skb, pbuf); drop_packet: adapter->stats.txdropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) { struct net_device *netdev = adapter->netdev; if (adapter->ahw->linkup && !linkup) { netdev_info(netdev, "NIC Link is down\n"); adapter->ahw->linkup = 0; netif_carrier_off(netdev); } else if (!adapter->ahw->linkup && linkup) { adapter->ahw->linkup = 1; /* Do not advertise Link up to the stack if device * is in loopback mode */ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) { netdev_info(netdev, "NIC Link is up for loopback test\n"); return; } netdev_info(netdev, "NIC Link is up\n"); netif_carrier_on(netdev); } } static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, struct qlcnic_rx_buffer *buffer) { struct sk_buff *skb; dma_addr_t dma; struct pci_dev *pdev = adapter->pdev; skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); if (!skb) { adapter->stats.skb_alloc_failure++; return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) { adapter->stats.rx_dma_map_error++; dev_kfree_skb_any(skb); return -ENOMEM; } buffer->skb = skb; buffer->dma = dma; return 0; } static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, u8 ring_id) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; uint32_t producer, handle; struct list_head *head; if (!spin_trylock(&rds_ring->lock)) return; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); if (!buffer->skb) { if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle, ring_id); pdesc->reference_handle = cpu_to_le16(handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); pdesc->addr_buffer = cpu_to_le64(buffer->dma); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; writel((producer - 1) & (rds_ring->num_desc - 1), rds_ring->crb_rcv_producer); } spin_unlock(&rds_ring->lock); } static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring, int budget) { u32 sw_consumer, hw_consumer; int i, done, count = 0; struct qlcnic_cmd_buffer *buffer; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; if (!spin_trylock(&tx_ring->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); while (sw_consumer != hw_consumer) { buffer = &tx_ring->cmd_buf_arr[sw_consumer]; if (buffer->skb) { frag = &buffer->frag_array[0]; dma_unmap_single(&pdev->dev, frag->dma, frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; for (i = 1; i < buffer->frag_count; i++) { frag++; dma_unmap_page(&pdev->dev, frag->dma, frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; } tx_ring->tx_stats.xmit_finished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); if (++count >= budget) break; } tx_ring->sw_consumer = sw_consumer; if (count && netif_running(netdev)) { smp_mb(); if (netif_tx_queue_stopped(tx_ring->txq) && netif_carrier_ok(netdev)) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { netif_tx_wake_queue(tx_ring->txq); tx_ring->tx_stats.xmit_on++; } } adapter->tx_timeo_cnt = 0; } /* * If everything is freed up to consumer then check if the ring is full * If the ring is full then check if more needs to be freed and * schedule the call back again. * * This happens when there are 2 CPUs. One could be freeing and the * other filling it. If the ring is full when we get out of here and * the card has already interrupted the host then the host can miss the * interrupt. * * There is still a possible race condition and the host could miss an * interrupt. The card has to take care of this. */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); spin_unlock(&tx_ring->tx_clean_lock); return done; } static int qlcnic_poll(struct napi_struct *napi, int budget) { int tx_complete, work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; struct qlcnic_host_tx_ring *tx_ring; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; tx_ring = sds_ring->tx_ring; tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); /* Check if we need a repoll */ if (!tx_complete) work_done = budget; if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_sds_intr(adapter, sds_ring); qlcnic_enable_tx_intr(adapter, tx_ring); } } return work_done; } static int qlcnic_tx_poll(struct napi_struct *napi, int budget) { struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; int work_done; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); if (work_done) { napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); } else { /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ work_done = budget; } return work_done; } static int qlcnic_rx_poll(struct napi_struct *napi, int budget) { struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; int work_done; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; work_done = qlcnic_process_rcv_ring(sds_ring, budget); if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } return work_done; } static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, struct qlcnic_fw_msg *msg) { u32 cable_OUI; u16 cable_len, link_speed; u8 link_status, module, duplex, autoneg, lb_status = 0; struct net_device *netdev = adapter->netdev; adapter->ahw->has_link_events = 1; cable_OUI = msg->body[1] & 0xffffffff; cable_len = (msg->body[1] >> 32) & 0xffff; link_speed = (msg->body[1] >> 48) & 0xffff; link_status = msg->body[2] & 0xff; duplex = (msg->body[2] >> 16) & 0xff; autoneg = (msg->body[2] >> 24) & 0xff; lb_status = (msg->body[2] >> 32) & 0x3; module = (msg->body[2] >> 8) & 0xff; if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, length %d\n", cable_OUI, cable_len); else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) dev_info(&netdev->dev, "unsupported cable length %d\n", cable_len); if (!link_status && (lb_status == QLCNIC_ILB_MODE || lb_status == QLCNIC_ELB_MODE)) adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; qlcnic_advert_link_change(adapter, link_status); if (duplex == LINKEVENT_FULL_DUPLEX) adapter->ahw->link_duplex = DUPLEX_FULL; else adapter->ahw->link_duplex = DUPLEX_HALF; adapter->ahw->module_type = module; adapter->ahw->link_autoneg = autoneg; if (link_status) { adapter->ahw->link_speed = link_speed; } else { adapter->ahw->link_speed = SPEED_UNKNOWN; adapter->ahw->link_duplex = DUPLEX_UNKNOWN; } } static void qlcnic_handle_fw_message(int desc_cnt, int index, struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_fw_msg msg; struct status_desc *desc; struct qlcnic_adapter *adapter; struct device *dev; int i = 0, opcode, ret; while (desc_cnt > 0 && i < 8) { desc = &sds_ring->desc_head[index]; msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); index = get_next_index(index, sds_ring->num_desc); desc_cnt--; } adapter = sds_ring->adapter; dev = &adapter->pdev->dev; opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); switch (opcode) { case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: qlcnic_handle_linkevent(adapter, &msg); break; case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: ret = (u32)(msg.body[1]); switch (ret) { case 0: adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; break; case 1: dev_info(dev, "loopback already in progress\n"); adapter->ahw->diag_cnt = -EINPROGRESS; break; case 2: dev_info(dev, "loopback cable is not connected\n"); adapter->ahw->diag_cnt = -ENODEV; break; default: dev_info(dev, "loopback configure request failed, err %x\n", ret); adapter->ahw->diag_cnt = -EIO; break; } break; case QLCNIC_C2H_OPCODE_GET_DCB_AEN: qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg); break; default: break; } } static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *ring, u16 index, u16 cksum) { struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; buffer = &ring->rx_buf_arr[index]; if (unlikely(buffer->skb == NULL)) { WARN_ON(1); return NULL; } dma_unmap_single(&adapter->pdev->dev, buffer->dma, ring->dma_size, DMA_FROM_DEVICE); skb = buffer->skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else { skb_checksum_none_assert(skb); } buffer->skb = NULL; return skb; } static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, u16 *vlan_tag) { struct ethhdr *eth_hdr; if (!__vlan_get_tag(skb, vlan_tag)) { eth_hdr = (struct ethhdr *)skb->data; memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } if (!adapter->rx_pvid) return 0; if (*vlan_tag == adapter->rx_pvid) { /* Outer vlan tag. Packet should follow non-vlan path */ *vlan_tag = 0xffff; return 0; } if (adapter->flags & QLCNIC_TAGGING_ENABLED) return 0; return -EINVAL; } static struct qlcnic_rx_buffer * qlcnic_process_rcv(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, int ring, u64 sts_data0) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset, is_lb_pkt; u16 vid = 0xffff, t_vid; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; length = qlcnic_get_sts_totallength(sts_data0); cksum = qlcnic_get_sts_status(sts_data0); pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return buffer; if (adapter->rx_mac_learn) { t_vid = 0; is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); } if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); napi_gro_receive(&sds_ring->napi, skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return buffer; } #define QLC_TCP_HDR_SIZE 20 #define QLC_TCP_TS_OPTION_SIZE 12 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) static struct qlcnic_rx_buffer * qlcnic_process_lro(struct qlcnic_adapter *adapter, int ring, u64 sts_data0, u64 sts_data1) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; struct iphdr *iph; struct ipv6hdr *ipv6h; struct tcphdr *th; bool push, timestamp; int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt; u16 lro_length, length, data_offset, t_vid, vid = 0xffff; u32 seq_number; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_lro_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); lro_length = qlcnic_get_lro_sts_length(sts_data0); l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); push = qlcnic_get_lro_sts_push_flag(sts_data0); seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return buffer; if (adapter->rx_mac_learn) { t_vid = 0; is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); } if (timestamp) data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; else data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); if (ntohs(skb->protocol) == ETH_P_IPV6) { ipv6h = (struct ipv6hdr *)skb->data; th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); length = (th->doff << 2) + lro_length; ipv6h->payload_len = htons(length); } else { iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); } th->psh = push; th->seq = htonl(seq_number); length = skb->len; if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); if (skb->protocol == htons(ETH_P_IPV6)) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); netif_receive_skb(skb); adapter->stats.lro_pkts++; adapter->stats.lrobytes += length; return buffer; } static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) { struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_adapter *adapter = sds_ring->adapter; struct list_head *cur; struct status_desc *desc; struct qlcnic_rx_buffer *rxbuf; int opcode, desc_cnt, count = 0; u64 sts_data0, sts_data1; u8 ring; u32 consumer = sds_ring->consumer; while (count < max) { desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) break; desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); opcode = qlcnic_get_sts_opcode(sts_data0); switch (opcode) { case QLCNIC_RXPKT_DESC: case QLCNIC_OLD_RXPKT_DESC: case QLCNIC_SYN_OFFLOAD: ring = qlcnic_get_sts_type(sts_data0); rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring, sts_data0); break; case QLCNIC_LRO_DESC: ring = qlcnic_get_lro_sts_type(sts_data0); sts_data1 = le64_to_cpu(desc->status_desc_data[1]); rxbuf = qlcnic_process_lro(adapter, ring, sts_data0, sts_data1); break; case QLCNIC_RESPONSE_DESC: qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); goto skip; default: goto skip; } WARN_ON(desc_cnt > 1); if (likely(rxbuf)) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); else adapter->stats.null_rxbuf++; skip: for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW; consumer = get_next_index(consumer, sds_ring->num_desc); } count++; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct qlcnic_rx_buffer, list); qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); } spin_lock(&rds_ring->lock); list_splice_tail_init(&sds_ring->free_list[ring], &rds_ring->free_list); spin_unlock(&rds_ring->lock); } qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); } if (count) { sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } return count; } void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, u8 ring_id) { struct rcv_desc *pdesc; struct qlcnic_rx_buffer *buffer; int count = 0; u32 producer, handle; struct list_head *head; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); if (!buffer->skb) { if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->addr_buffer = cpu_to_le64(buffer->dma); handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle, ring_id); pdesc->reference_handle = cpu_to_le16(handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; writel((producer-1) & (rds_ring->num_desc-1), rds_ring->crb_rcv_producer); } } static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) { if (adapter->ahw->msg_enable & NETIF_MSG_DRV) { char prefix[30]; scnprintf(prefix, sizeof(prefix), "%s: %s: ", dev_name(&adapter->pdev->dev), __func__); print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len, true); } } static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, u64 sts_data0) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset; if (unlikely(ring >= adapter->max_rds_rings)) return; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_get_sts_refhandle(sts_data0); length = qlcnic_get_sts_totallength(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return; cksum = qlcnic_get_sts_status(sts_data0); pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) adapter->ahw->diag_cnt++; else dump_skb(skb, adapter); dev_kfree_skb_any(skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return; } void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_adapter *adapter = sds_ring->adapter; struct status_desc *desc; u64 sts_data0; int ring, opcode, desc_cnt; u32 consumer = sds_ring->consumer; desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) return; desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); opcode = qlcnic_get_sts_opcode(sts_data0); switch (opcode) { case QLCNIC_RESPONSE_DESC: qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); break; default: ring = qlcnic_get_sts_type(sts_data0); qlcnic_process_rcv_diag(adapter, ring, sts_data0); break; } for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); } sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) return -ENOMEM; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll); } else { if (ring == (adapter->drv_sds_rings - 1)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll); else netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll); } } if (qlcnic_alloc_tx_rings(adapter, netdev)) { qlcnic_free_sds_rings(recv_ctx); return -ENOMEM; } if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add_tx(netdev, &tx_ring->napi, qlcnic_tx_poll); } } return 0; } void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } qlcnic_free_sds_rings(adapter->recv_ctx); if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); } } qlcnic_free_tx_rings(adapter); } void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); qlcnic_enable_sds_intr(adapter, sds_ring); } if (qlcnic_check_multi_tx(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); qlcnic_enable_tx_intr(adapter, tx_ring); } } } void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; qlcnic_disable_sds_intr(adapter, sds_ring); napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !adapter->ahw->diag_test && qlcnic_check_multi_tx(adapter)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_disable_tx_intr(adapter, tx_ring); napi_synchronize(&tx_ring->napi); napi_disable(&tx_ring->napi); } } } #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) #define QLC_83XX_LRO_LB_PKT (1ULL << 46) static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt) { if (lro_pkt) return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0; else return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; } #define QLCNIC_ENCAP_LENGTH_MASK 0x7f static inline u8 qlcnic_encap_length(u64 sts_data) { return sts_data & QLCNIC_ENCAP_LENGTH_MASK; } static struct qlcnic_rx_buffer * qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring, u8 ring, u64 sts_data[]) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length, cksum, is_lb_pkt; u16 vid = 0xffff; int err; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_83xx_hndl(sts_data[0]); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; length = qlcnic_83xx_pktln(sts_data[0]); cksum = qlcnic_83xx_csum_status(sts_data[1]); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return buffer; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); err = qlcnic_check_rx_tagging(adapter, skb, &vid); if (adapter->rx_mac_learn) { is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); } if (unlikely(err)) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); if (qlcnic_encap_length(sts_data[1]) && skb->ip_summed == CHECKSUM_UNNECESSARY) { skb->csum_level = 1; adapter->stats.encap_rx_csummed++; } if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); napi_gro_receive(&sds_ring->napi, skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return buffer; } static struct qlcnic_rx_buffer * qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, u8 ring, u64 sts_data[]) { struct net_device *netdev = adapter->netdev; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_rx_buffer *buffer; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; struct iphdr *iph; struct ipv6hdr *ipv6h; struct tcphdr *th; bool push; int l2_hdr_offset, l4_hdr_offset; int index, is_lb_pkt; u16 lro_length, length, data_offset, gso_size; u16 vid = 0xffff; int err; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_83xx_hndl(sts_data[0]); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; lro_length = qlcnic_83xx_lro_pktln(sts_data[0]); l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]); l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]); push = qlcnic_83xx_is_psh_bit(sts_data[1]); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return buffer; if (qlcnic_83xx_is_tstamp(sts_data[1])) data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE; else data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE; skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); err = qlcnic_check_rx_tagging(adapter, skb, &vid); if (adapter->rx_mac_learn) { is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); } if (unlikely(err)) { adapter->stats.rxdropped++; dev_kfree_skb(skb); return buffer; } skb->protocol = eth_type_trans(skb, netdev); if (ntohs(skb->protocol) == ETH_P_IPV6) { ipv6h = (struct ipv6hdr *)skb->data; th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); length = (th->doff << 2) + lro_length; ipv6h->payload_len = htons(length); } else { iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); } th->psh = push; length = skb->len; if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]); skb_shinfo(skb)->gso_size = gso_size; if (skb->protocol == htons(ETH_P_IPV6)) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } if (vid != 0xffff) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); netif_receive_skb(skb); adapter->stats.lro_pkts++; adapter->stats.lrobytes += length; return buffer; } static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) { struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_adapter *adapter = sds_ring->adapter; struct list_head *cur; struct status_desc *desc; struct qlcnic_rx_buffer *rxbuf = NULL; u8 ring; u64 sts_data[2]; int count = 0, opcode; u32 consumer = sds_ring->consumer; while (count < max) { desc = &sds_ring->desc_head[consumer]; sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); opcode = qlcnic_83xx_opcode(sts_data[1]); if (!opcode) break; sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); ring = QLCNIC_FETCH_RING_ID(sts_data[0]); switch (opcode) { case QLC_83XX_REG_DESC: rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring, ring, sts_data); break; case QLC_83XX_LRO_DESC: rxbuf = qlcnic_83xx_process_lro(adapter, ring, sts_data); break; default: dev_info(&adapter->pdev->dev, "Unknown opcode: 0x%x\n", opcode); goto skip; } if (likely(rxbuf)) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); else adapter->stats.null_rxbuf++; skip: desc = &sds_ring->desc_head[consumer]; /* Reset the descriptor */ desc->status_desc_data[1] = 0; consumer = get_next_index(consumer, sds_ring->num_desc); count++; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct qlcnic_rx_buffer, list); qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); } spin_lock(&rds_ring->lock); list_splice_tail_init(&sds_ring->free_list[ring], &rds_ring->free_list); spin_unlock(&rds_ring->lock); } qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); } if (count) { sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); } return count; } static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) { int tx_complete; int work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; struct qlcnic_host_tx_ring *tx_ring; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; /* tx ring count = 1 */ tx_ring = adapter->tx_ring; tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); /* Check if we need a repoll */ if (!tx_complete) work_done = budget; if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } return work_done; } static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) { int tx_complete; int work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; struct qlcnic_host_tx_ring *tx_ring; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; /* tx ring count = 1 */ tx_ring = adapter->tx_ring; tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); /* Check if we need a repoll */ if (!tx_complete) work_done = budget; if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); qlcnic_enable_sds_intr(adapter, sds_ring); } return work_done; } static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) { int work_done; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); if (work_done) { napi_complete(&tx_ring->napi); if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) qlcnic_enable_tx_intr(adapter, tx_ring); } else { /* need a repoll */ work_done = budget; } return work_done; } static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) { int work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) qlcnic_enable_sds_intr(adapter, sds_ring); } return work_done; } void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); if (adapter->flags & QLCNIC_MSIX_ENABLED) qlcnic_enable_sds_intr(adapter, sds_ring); } if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); qlcnic_enable_tx_intr(adapter, tx_ring); } } } void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) return; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) qlcnic_disable_sds_intr(adapter, sds_ring); napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_disable_tx_intr(adapter, tx_ring); napi_synchronize(&tx_ring->napi); napi_disable(&tx_ring->napi); } } } int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) return -ENOMEM; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) { if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_rx_poll); else netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_msix_sriov_vf_poll); } else { netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_poll); } } if (qlcnic_alloc_tx_rings(adapter, netdev)) { qlcnic_free_sds_rings(recv_ctx); return -ENOMEM; } if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add_tx(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll); } } return 0; } void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } qlcnic_free_sds_rings(adapter->recv_ctx); if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); } } qlcnic_free_tx_rings(adapter); } static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, u64 sts_data[]) { struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct sk_buff *skb; struct qlcnic_host_rds_ring *rds_ring; int index, length; if (unlikely(ring >= adapter->max_rds_rings)) return; rds_ring = &recv_ctx->rds_rings[ring]; index = qlcnic_83xx_hndl(sts_data[0]); if (unlikely(index >= rds_ring->num_desc)) return; length = qlcnic_83xx_pktln(sts_data[0]); skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) adapter->ahw->diag_cnt++; else dump_skb(skb, adapter); dev_kfree_skb_any(skb); return; } void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) { struct qlcnic_adapter *adapter = sds_ring->adapter; struct status_desc *desc; u64 sts_data[2]; int ring, opcode; u32 consumer = sds_ring->consumer; desc = &sds_ring->desc_head[consumer]; sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); opcode = qlcnic_83xx_opcode(sts_data[1]); if (!opcode) return; ring = QLCNIC_FETCH_RING_ID(sts_data[0]); qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); sds_ring->consumer = consumer; writel(consumer, sds_ring->crb_sts_consumer); }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/slab.h> #include <net/ip.h> #include <linux/bitops.h> #include "qlcnic.h" #include "qlcnic_hdr.h" #define MASK(n) ((1ULL<<(n))-1) #define OCM_WIN_P3P(addr) (addr & 0xffc0000) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) #define CRB_INDIRECT_2M (0x1e0000UL) struct qlcnic_ms_reg_ctrl { u32 ocm_window; u32 control; u32 hi; u32 low; u32 rd[4]; u32 wd[4]; u64 off; }; #ifndef readq static inline u64 readq(void __iomem *addr) { return readl(addr) | (((u64) readl(addr + 4)) << 32LL); } #endif #ifndef writeq static inline void writeq(u64 val, void __iomem *addr) { writel(((u32) (val)), (addr)); writel(((u32) (val >> 32)), (addr + 4)); } #endif static struct crb_128M_2M_block_map crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static const unsigned crb_hub_agt[64] = { 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PS, QLCNIC_HW_CRB_HUB_AGT_ADR_MN, QLCNIC_HW_CRB_HUB_AGT_ADR_MS, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, QLCNIC_HW_CRB_HUB_AGT_ADR_SN, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_EG, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PS, QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* PCI Windowing for DDR regions. */ #define QLCNIC_PCIE_SEM_TIMEOUT 10000 static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data) { u32 dest; void __iomem *val; dest = addr & 0xFFFF0000; val = bar0 + QLCNIC_FW_DUMP_REG1; writel(dest, val); readl(val); val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); *data = readl(val); } static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data) { u32 dest; void __iomem *val; dest = addr & 0xFFFF0000; val = bar0 + QLCNIC_FW_DUMP_REG1; writel(dest, val); readl(val); val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); writel(data, val); readl(val); } int qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) { int timeout = 0, err = 0, done = 0; while (!done) { done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), &err); if (done == 1) break; if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { if (id_reg) { done = QLCRD32(adapter, id_reg, &err); if (done != -1) dev_err(&adapter->pdev->dev, "Failed to acquire sem=%d lock held by=%d\n", sem, done); else dev_err(&adapter->pdev->dev, "Failed to acquire sem=%d lock", sem); } else { dev_err(&adapter->pdev->dev, "Failed to acquire sem=%d lock", sem); } return -EIO; } udelay(1200); } if (id_reg) QLCWR32(adapter, id_reg, adapter->portnum); return 0; } void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) { int err = 0; QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err); } int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) { int err = 0; u32 data; if (qlcnic_82xx_check(adapter)) qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); else { data = QLCRD32(adapter, addr, &err); if (err == -EIO) return err; } return data; } int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) { int ret = 0; if (qlcnic_82xx_check(adapter)) qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); else ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); return ret; } static int qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { u32 i, producer; struct qlcnic_cmd_buffer *pbuf; struct cmd_desc_type0 *cmd_desc; struct qlcnic_host_tx_ring *tx_ring; i = 0; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return -EIO; tx_ring = &adapter->tx_ring[0]; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; if (nr_desc >= qlcnic_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); smp_mb(); if (qlcnic_tx_avail(tx_ring) > nr_desc) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) netif_tx_wake_queue(tx_ring->txq); } else { adapter->stats.xmit_off++; __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } } do { cmd_desc = &cmd_desc_arr[i]; pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; memcpy(&tx_ring->desc_head[producer], cmd_desc, sizeof(struct cmd_desc_type0)); producer = get_next_index(producer, tx_ring->num_desc); i++; } while (i != nr_desc); tx_ring->producer = producer; qlcnic_update_cmd_producer(tx_ring); __netif_tx_unlock_bh(tx_ring->txq); return 0; } int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, u16 vlan_id, u8 op) { struct qlcnic_nic_req req; struct qlcnic_mac_req *mac_req; struct qlcnic_vlan_req *vlan_req; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); mac_req = (struct qlcnic_mac_req *)&req.words[0]; mac_req->op = op; memcpy(mac_req->mac_addr, addr, ETH_ALEN); vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; vlan_req->vlan_id = cpu_to_le16(vlan_id); return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr) { struct qlcnic_mac_vlan_list *cur; int err = -EINVAL; /* Delete MAC from the existing list */ list_for_each_entry(cur, &adapter->mac_list, list) { if (ether_addr_equal(addr, cur->mac_addr)) { err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 0, QLCNIC_MAC_DEL); if (err) return err; list_del(&cur->list); kfree(cur); return err; } } return err; } int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan, enum qlcnic_mac_type mac_type) { struct qlcnic_mac_vlan_list *cur; /* look up if already exists */ list_for_each_entry(cur, &adapter->mac_list, list) { if (ether_addr_equal(addr, cur->mac_addr) && cur->vlan_id == vlan) return 0; } cur = kzalloc(sizeof(*cur), GFP_ATOMIC); if (cur == NULL) return -ENOMEM; memcpy(cur->mac_addr, addr, ETH_ALEN); if (qlcnic_sre_macaddr_change(adapter, cur->mac_addr, vlan, QLCNIC_MAC_ADD)) { kfree(cur); return -EIO; } cur->vlan_id = vlan; cur->mac_type = mac_type; list_add_tail(&cur->list, &adapter->mac_list); return 0; } void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter) { struct qlcnic_mac_vlan_list *cur; struct list_head *head, *tmp; list_for_each_safe(head, tmp, &adapter->mac_list) { cur = list_entry(head, struct qlcnic_mac_vlan_list, list); if (cur->mac_type != QLCNIC_MULTICAST_MAC) continue; qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); } } static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan, QLCNIC_UNICAST_MAC); qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC); if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > ahw->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; } else if (!netdev_mc_empty(netdev)) { qlcnic_flush_mcast_mac(adapter); netdev_for_each_mc_addr(ha, netdev) qlcnic_nic_add_mac(adapter, ha->addr, vlan, QLCNIC_MULTICAST_MAC); } /* configure unicast MAC address, if there is not sufficient space * to store all the unicast addresses then enable promiscuous mode */ if (netdev_uc_count(netdev) > ahw->max_uc_count) { mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if (!netdev_uc_empty(netdev)) { netdev_for_each_uc_addr(ha, netdev) qlcnic_nic_add_mac(adapter, ha->addr, vlan, QLCNIC_UNICAST_MAC); } if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { qlcnic_alloc_lb_filters_mem(adapter); adapter->drv_mac_learn = 1; if (adapter->flags & QLCNIC_ESWITCH_ENABLED) adapter->rx_mac_learn = true; } else { adapter->drv_mac_learn = 0; adapter->rx_mac_learn = false; } qlcnic_nic_set_promisc(adapter, mode); } void qlcnic_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_vf_set_multi(netdev); else __qlcnic_set_multi(netdev, 0); } int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { struct qlcnic_nic_req req; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(mode); return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter) { struct list_head *head = &adapter->mac_list; struct qlcnic_mac_vlan_list *cur; while (!list_empty(head)) { cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 0, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); } } void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; struct hlist_node *n; struct hlist_head *head; int i; unsigned long expires; u8 cmd; for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ; if (time_before(expires, jiffies)) { qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, tmp_fil->vlan_id, cmd); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); spin_unlock_bh(&adapter->mac_learn_lock); kfree(tmp_fil); } } } for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { head = &(adapter->rx_fhash.fhead[i]); hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ; if (time_before(expires, jiffies)) { spin_lock_bh(&adapter->rx_mac_learn_lock); adapter->rx_fhash.fnum--; hlist_del(&tmp_fil->fnode); spin_unlock_bh(&adapter->rx_mac_learn_lock); kfree(tmp_fil); } } } } void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) { struct qlcnic_filter *tmp_fil; struct hlist_node *n; struct hlist_head *head; int i; u8 cmd; for (i = 0; i < adapter->fhash.fbucket_size; i++) { head = &(adapter->fhash.fhead[i]); hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr, tmp_fil->vlan_id, cmd); spin_lock_bh(&adapter->mac_learn_lock); adapter->fhash.fnum--; hlist_del(&tmp_fil->fnode); spin_unlock_bh(&adapter->mac_learn_lock); kfree(tmp_fil); } } } static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) { struct qlcnic_nic_req req; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); req.words[0] = cpu_to_le64(flag); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", flag ? "Set" : "Reset"); return rv; } int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { if (qlcnic_set_fw_loopback(adapter, mode)) return -EIO; if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { qlcnic_set_fw_loopback(adapter, 0); return -EIO; } msleep(1000); return 0; } int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct net_device *netdev = adapter->netdev; mode = VPORT_MISS_MODE_DROP; qlcnic_set_fw_loopback(adapter, 0); if (netdev->flags & IFF_PROMISC) mode = VPORT_MISS_MODE_ACCEPT_ALL; else if (netdev->flags & IFF_ALLMULTI) mode = VPORT_MISS_MODE_ACCEPT_MULTI; qlcnic_nic_set_promisc(adapter, mode); msleep(1000); return 0; } int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter) { u8 mac[ETH_ALEN]; int ret; ret = qlcnic_get_mac_address(adapter, mac, adapter->ahw->physical_port); if (ret) return ret; memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN); adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID; return 0; } int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter) { struct qlcnic_nic_req req; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | ((u64) adapter->portnum << 16)); req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | ((u64) adapter->ahw->coal.rx_time_us) << 16); req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | ((u64) adapter->ahw->coal.type) << 32 | ((u64) adapter->ahw->coal.sts_ring_mask) << 40); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send interrupt coalescing parameters\n"); return rv; } /* Send the interrupt coalescing parameter set by ethtool to the card. */ int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter, struct ethtool_coalesce *ethcoal) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; int rv; coal->flag = QLCNIC_INTR_DEFAULT; coal->rx_time_us = ethcoal->rx_coalesce_usecs; coal->rx_packets = ethcoal->rx_max_coalesced_frames; rv = qlcnic_82xx_set_rx_coalesce(adapter); if (rv) netdev_err(adapter->netdev, "Failed to set Rx coalescing parameters\n"); return rv; } #define QLCNIC_ENABLE_IPV4_LRO BIT_0 #define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9) int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int rv; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); word = 0; if (enable) { word = QLCNIC_ENABLE_IPV4_LRO; if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAP2_HW_LRO_IPV6) word |= QLCNIC_ENABLE_IPV6_LRO; } req.words[0] = cpu_to_le64(word); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send configure hw lro request\n"); return rv; } int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { struct qlcnic_nic_req req; u64 word; int rv; if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "Could not send configure bridge mode request\n"); adapter->flags ^= QLCNIC_BRIDGE_ENABLED; return rv; } #define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3 #define QLCNIC_ENABLE_TYPE_C_RSS BIT_10 #define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63) #define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int i, rv; static const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); /* * RSS request: * bits 3-0: hash_method * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 10: type-c rss * 11: udp rss * 47-12: reserved * 62-48: indirection table mask * 63: feature flag */ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) | (u64)QLCNIC_ENABLE_TYPE_C_RSS | (u64)QLCNIC_RSS_FEATURE_FLAG; req.words[0] = cpu_to_le64(word); for (i = 0; i < 5; i++) req.words[i+1] = cpu_to_le64(key[i]); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not configure RSS\n"); return rv; } void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd) { struct qlcnic_nic_req req; struct qlcnic_ipaddr *ipa; u64 word; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(cmd); ipa = (struct qlcnic_ipaddr *)&req.words[1]; ipa->ipv4 = ip; rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x request\n", (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); } int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_nic_req req; u64 word; int rv; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not configure link notification\n"); return rv; } static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) { struct qlcnic_nic_req req; u64 word; int rv; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_LRO_REQUEST | ((u64)adapter->portnum << 16) | ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; req.req_hdr = cpu_to_le64(word); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) dev_err(&adapter->netdev->dev, "could not cleanup lro flows\n"); return rv; } /* * qlcnic_change_mtu - Change the Maximum Transfer Unit * @returns 0 on success, negative on failure */ int qlcnic_change_mtu(struct net_device *netdev, int mtu) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int rc = 0; rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); if (!rc) netdev->mtu = mtu; return rc; } static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter, netdev_features_t features) { u32 offload_flags = adapter->offload_flags; if (offload_flags & BIT_0) { features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; adapter->rx_csum = 1; if (QLCNIC_IS_TSO_CAPABLE(adapter)) { if (!(offload_flags & BIT_1)) features &= ~NETIF_F_TSO; else features |= NETIF_F_TSO; if (!(offload_flags & BIT_2)) features &= ~NETIF_F_TSO6; else features |= NETIF_F_TSO6; } } else { features &= ~(NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); if (QLCNIC_IS_TSO_CAPABLE(adapter)) features &= ~(NETIF_F_TSO | NETIF_F_TSO6); adapter->rx_csum = 0; } return features; } netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); netdev_features_t changed; if (qlcnic_82xx_check(adapter) && (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) { features = qlcnic_process_flags(adapter, features); } else { changed = features ^ netdev->features; features ^= changed & (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6); } } if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; return features; } int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = netdev->features ^ features; int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; if (!(changed & NETIF_F_LRO)) return 0; netdev->features ^= NETIF_F_LRO; if (qlcnic_config_hw_lro(adapter, hw_lro)) return -EIO; if (!hw_lro && qlcnic_82xx_check(adapter)) { if (qlcnic_send_lro_cleanup(adapter)) return -EIO; } return 0; } /* * Changes the CRB window to the specified window. */ /* Returns < 0 if off is not valid, * 1 if window access is needed. 'off' is set to offset from * CRB space in 128M pci map * 0 if no window access is needed. 'off' is set to 2M addr * In: 'off' is offset from base in 128M pci map */ static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw, ulong off, void __iomem **addr) { const struct crb_128M_2M_sub_block_map *m; if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) return -EINVAL; off -= QLCNIC_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { *addr = ahw->pci_base0 + m->start_2M + (off - m->start_128M); return 0; } /* * Not in direct map, use crb window */ *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); return 1; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static int qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) { u32 window; void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; off -= QLCNIC_PCI_CRBSPACE; window = CRB_HI(off); if (window == 0) { dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); return -EIO; } writel(window, addr); if (readl(addr) != window) { if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); return -EIO; } return 0; } int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data) { unsigned long flags; int rv; void __iomem *addr = NULL; rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr); if (rv == 0) { writel(data, addr); return 0; } if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw->crb_lock, flags); crb_win_lock(adapter); rv = qlcnic_pci_set_crbwindow_2M(adapter, off); if (!rv) writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); return rv; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -EIO; } int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off, int *err) { unsigned long flags; int rv; u32 data = -1; void __iomem *addr = NULL; rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr); if (rv == 0) return readl(addr); if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw->crb_lock, flags); crb_win_lock(adapter); if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); return data; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -1; } void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw, u32 offset) { void __iomem *addr = NULL; WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr)); return addr; } static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u32 window, u64 off, u64 *data, int op) { void __iomem *addr; u32 start; mutex_lock(&adapter->ahw->mem_lock); writel(window, adapter->ahw->ocm_win_crb); /* read back to flush */ readl(adapter->ahw->ocm_win_crb); start = QLCNIC_PCI_OCM0_2M + off; addr = adapter->ahw->pci_base0 + start; if (op == 0) /* read */ *data = readq(addr); else /* write */ writeq(*data, addr); /* Set window to 0 */ writel(0, adapter->ahw->ocm_win_crb); readl(adapter->ahw->ocm_win_crb); mutex_unlock(&adapter->ahw->mem_lock); return 0; } static void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) { void __iomem *addr = adapter->ahw->pci_base0 + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); mutex_lock(&adapter->ahw->mem_lock); *data = readq(addr); mutex_unlock(&adapter->ahw->mem_lock); } static void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) { void __iomem *addr = adapter->ahw->pci_base0 + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); mutex_lock(&adapter->ahw->mem_lock); writeq(data, addr); mutex_unlock(&adapter->ahw->mem_lock); } /* Set MS memory control data for different adapters */ static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off, struct qlcnic_ms_reg_ctrl *ms) { ms->control = QLCNIC_MS_CTRL; ms->low = QLCNIC_MS_ADDR_LO; ms->hi = QLCNIC_MS_ADDR_HI; if (off & 0xf) { ms->wd[0] = QLCNIC_MS_WRTDATA_LO; ms->rd[0] = QLCNIC_MS_RDDATA_LO; ms->wd[1] = QLCNIC_MS_WRTDATA_HI; ms->rd[1] = QLCNIC_MS_RDDATA_HI; ms->wd[2] = QLCNIC_MS_WRTDATA_ULO; ms->wd[3] = QLCNIC_MS_WRTDATA_UHI; ms->rd[2] = QLCNIC_MS_RDDATA_ULO; ms->rd[3] = QLCNIC_MS_RDDATA_UHI; } else { ms->wd[0] = QLCNIC_MS_WRTDATA_ULO; ms->rd[0] = QLCNIC_MS_RDDATA_ULO; ms->wd[1] = QLCNIC_MS_WRTDATA_UHI; ms->rd[1] = QLCNIC_MS_RDDATA_UHI; ms->wd[2] = QLCNIC_MS_WRTDATA_LO; ms->wd[3] = QLCNIC_MS_WRTDATA_HI; ms->rd[2] = QLCNIC_MS_RDDATA_LO; ms->rd[3] = QLCNIC_MS_RDDATA_HI; } ms->ocm_window = OCM_WIN_P3P(off); ms->off = GET_MEM_OFFS_2M(off); } int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) { int j, ret = 0; u32 temp, off8; struct qlcnic_ms_reg_ctrl ms; /* Only 64-bit aligned access */ if (off & 7) return -EIO; memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl)); if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, QLCNIC_ADDR_QDR_NET_MAX) || ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX))) return -EIO; qlcnic_set_ms_controls(adapter, off, &ms); if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window, ms.off, &data, 1); off8 = off & ~0xf; mutex_lock(&adapter->ahw->mem_lock); qlcnic_ind_wr(adapter, ms.low, off8); qlcnic_ind_wr(adapter, ms.hi, 0); qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE); qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qlcnic_ind_rd(adapter, ms.control); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { ret = -EIO; goto done; } /* This is the modify part of read-modify-write */ qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0])); qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1])); /* This is the write part of read-modify-write */ qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff); qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff); qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE); qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qlcnic_ind_rd(adapter, ms.control); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; done: mutex_unlock(&adapter->ahw->mem_lock); return ret; } int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off8; u64 val; struct qlcnic_ms_reg_ctrl ms; /* Only 64-bit aligned access */ if (off & 7) return -EIO; if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, QLCNIC_ADDR_QDR_NET_MAX) || ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX))) return -EIO; memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl)); qlcnic_set_ms_controls(adapter, off, &ms); if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window, ms.off, data, 0); mutex_lock(&adapter->ahw->mem_lock); off8 = off & ~0xf; qlcnic_ind_wr(adapter, ms.low, off8); qlcnic_ind_wr(adapter, ms.hi, 0); qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE); qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qlcnic_ind_rd(adapter, ms.control); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { temp = qlcnic_ind_rd(adapter, ms.rd[3]); val = (u64)temp << 32; val |= qlcnic_ind_rd(adapter, ms.rd[2]); *data = val; ret = 0; } mutex_unlock(&adapter->ahw->mem_lock); return ret; } int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) { int offset, board_type, magic, err = 0; struct pci_dev *pdev = adapter->pdev; offset = QLCNIC_FW_MAGIC_OFFSET; if (qlcnic_rom_fast_read(adapter, offset, &magic)) return -EIO; if (magic != QLCNIC_BDINFO_MAGIC) { dev_err(&pdev->dev, "invalid board config, magic=%08x\n", magic); return -EIO; } offset = QLCNIC_BRDTYPE_OFFSET; if (qlcnic_rom_fast_read(adapter, offset, &board_type)) return -EIO; adapter->ahw->board_type = board_type; if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err); if (err == -EIO) return err; if ((gpio & 0x8000) == 0) board_type = QLCNIC_BRDTYPE_P3P_10G_TP; } switch (board_type) { case QLCNIC_BRDTYPE_P3P_HMEZ: case QLCNIC_BRDTYPE_P3P_XG_LOM: case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_IMEZ: case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: case QLCNIC_BRDTYPE_P3P_10G_XFP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: adapter->ahw->port_type = QLCNIC_XGBE; break; case QLCNIC_BRDTYPE_P3P_REF_QG: case QLCNIC_BRDTYPE_P3P_4_GB: case QLCNIC_BRDTYPE_P3P_4_GB_MM: adapter->ahw->port_type = QLCNIC_GBE; break; case QLCNIC_BRDTYPE_P3P_10G_TP: adapter->ahw->port_type = (adapter->portnum < 2) ? QLCNIC_XGBE : QLCNIC_GBE; break; default: dev_err(&pdev->dev, "unknown board type %x\n", board_type); adapter->ahw->port_type = QLCNIC_XGBE; break; } return 0; } static int qlcnic_wol_supported(struct qlcnic_adapter *adapter) { u32 wol_cfg; int err = 0; wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); if (wol_cfg & (1UL << adapter->portnum)) { wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); if (err == -EIO) return err; if (wol_cfg & (1 << adapter->portnum)) return 1; } return 0; } int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) { struct qlcnic_nic_req req; int rv; u64 word; memset(&req, 0, sizeof(struct qlcnic_nic_req)); req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum); req.words[1] = cpu_to_le64(state); rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv) dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); return rv; } void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; u8 beacon_state; int err = 0; if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS); if (!err) { err = qlcnic_issue_cmd(adapter, &cmd); if (err) { netdev_err(adapter->netdev, "Failed to get current beacon state, err=%d\n", err); } else { beacon_state = cmd.rsp.arg[1]; if (beacon_state == QLCNIC_BEACON_DISABLE) ahw->beacon_state = QLCNIC_BEACON_OFF; else if (beacon_state == QLCNIC_BEACON_EANBLE) ahw->beacon_state = QLCNIC_BEACON_ON; } } qlcnic_free_mbx_args(&cmd); } return; } void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) { void __iomem *msix_base_addr; u32 func; u32 msix_base; pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; msix_base = readl(msix_base_addr); func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE; adapter->ahw->pci_func = func; } void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { int err = 0; u32 data; u64 qmdata; if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); memcpy(buf, &qmdata, size); } else { data = QLCRD32(adapter, offset, &err); memcpy(buf, &data, size); } } void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { u32 data; u64 qmdata; if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { memcpy(&qmdata, buf, size); qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); } else { memcpy(&data, buf, size); QLCWR32(adapter, offset, data); } } int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter) { return qlcnic_pcie_sem_lock(adapter, 5, 0); } void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter) { qlcnic_pcie_sem_unlock(adapter, 5); } int qlcnic_82xx_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); qlcnic_clr_all_drv_state(adapter, 0); clear_bit(__QLCNIC_RESETTING, &adapter->state); if (qlcnic_wol_supported(adapter)) device_wakeup_enable(&pdev->dev); return 0; } int qlcnic_82xx_resume(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; err = qlcnic_start_firmware(adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to start firmware\n"); return err; } if (netif_running(netdev)) { err = qlcnic_up(adapter, netdev); if (!err) qlcnic_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); return err; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/if_vlan.h> #include <linux/ipv6.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include "qlcnic.h" #include "qlcnic_sriov.h" static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *); static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8); static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, struct qlcnic_cmd_args *); static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *); static irqreturn_t qlcnic_83xx_handle_aen(int, void *); static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, pci_channel_state_t); static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *); static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); static void qlcnic_83xx_io_resume(struct pci_dev *); static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8); static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); static int qlcnic_83xx_resume(struct qlcnic_adapter *); static int qlcnic_83xx_shutdown(struct pci_dev *); static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *); #define RSS_HASHTYPE_IP_TCP 0x3 #define QLC_83XX_FW_MBX_CMD 0 #define QLC_SKIP_INACTIVE_PCI_REGS 7 #define QLC_MAX_LEGACY_FUNC_SUPP 8 /* 83xx Module type */ #define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */ #define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */ #define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */ #define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive * copper(compliant) */ #define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting * copper(compliant) */ #define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper * (legacy, best effort) */ #define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */ #define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */ #define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */ #define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/ #define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper * (legacy, best effort) */ #define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */ /* Port types */ #define QLC_83XX_10_CAPABLE BIT_8 #define QLC_83XX_100_CAPABLE BIT_9 #define QLC_83XX_1G_CAPABLE BIT_10 #define QLC_83XX_10G_CAPABLE BIT_11 #define QLC_83XX_AUTONEG_ENABLE BIT_15 static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, {QLCNIC_CMD_CREATE_RX_CTX, 136, 27}, {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, {QLCNIC_CMD_CREATE_TX_CTX, 54, 18}, {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1}, {QLCNIC_CMD_INTRPT_TEST, 22, 12}, {QLCNIC_CMD_SET_MTU, 3, 1}, {QLCNIC_CMD_READ_PHY, 4, 2}, {QLCNIC_CMD_WRITE_PHY, 5, 1}, {QLCNIC_CMD_READ_HW_REG, 4, 1}, {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, {QLCNIC_CMD_GET_PCI_INFO, 1, 129}, {QLCNIC_CMD_GET_NIC_INFO, 2, 19}, {QLCNIC_CMD_SET_NIC_INFO, 32, 1}, {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, {QLCNIC_CMD_CONFIG_PORT, 4, 1}, {QLCNIC_CMD_TEMP_SIZE, 1, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 5, 5}, {QLCNIC_CMD_GET_LINK_EVENT, 2, 1}, {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3}, {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1}, {QLCNIC_CMD_CONFIGURE_RSS, 14, 1}, {QLCNIC_CMD_CONFIGURE_LED, 2, 1}, {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1}, {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1}, {QLCNIC_CMD_GET_STATISTICS, 2, 80}, {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1}, {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, {QLCNIC_CMD_IDC_ACK, 5, 1}, {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1}, {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1}, {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { 0x38CC, /* Global Reset */ 0x38F0, /* Wildcard */ 0x38FC, /* Informant */ 0x3038, /* Host MBX ctrl */ 0x303C, /* FW MBX ctrl */ 0x355C, /* BOOT LOADER ADDRESS REG */ 0x3560, /* BOOT LOADER SIZE REG */ 0x3564, /* FW IMAGE ADDR REG */ 0x1000, /* MBX intr enable */ 0x1200, /* Default Intr mask */ 0x1204, /* Default Interrupt ID */ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */ 0x3784, /* QLC_83XX_IDC_DEV_STATE */ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */ 0x378C, /* QLC_83XX_IDC_DRV_ACK */ 0x3790, /* QLC_83XX_IDC_CTRL */ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */ 0x37A0, /* QLC_83XX_IDC_PF_0 */ 0x37A4, /* QLC_83XX_IDC_PF_1 */ 0x37A8, /* QLC_83XX_IDC_PF_2 */ 0x37AC, /* QLC_83XX_IDC_PF_3 */ 0x37B0, /* QLC_83XX_IDC_PF_4 */ 0x37B4, /* QLC_83XX_IDC_PF_5 */ 0x37B8, /* QLC_83XX_IDC_PF_6 */ 0x37BC, /* QLC_83XX_IDC_PF_7 */ 0x37C0, /* QLC_83XX_IDC_PF_8 */ 0x37C4, /* QLC_83XX_IDC_PF_9 */ 0x37C8, /* QLC_83XX_IDC_PF_10 */ 0x37CC, /* QLC_83XX_IDC_PF_11 */ 0x37D0, /* QLC_83XX_IDC_PF_12 */ 0x37D4, /* QLC_83XX_IDC_PF_13 */ 0x37D8, /* QLC_83XX_IDC_PF_14 */ 0x37DC, /* QLC_83XX_IDC_PF_15 */ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */ 0x37F0, /* QLC_83XX_DRV_OP_MODE */ 0x37F4, /* QLC_83XX_VNIC_STATE */ 0x3868, /* QLC_83XX_DRV_LOCK */ 0x386C, /* QLC_83XX_DRV_UNLOCK */ 0x3504, /* QLC_83XX_DRV_LOCK_ID */ 0x34A4, /* QLC_83XX_ASIC_TEMP */ }; const u32 qlcnic_83xx_reg_tbl[] = { 0x34A8, /* PEG_HALT_STAT1 */ 0x34AC, /* PEG_HALT_STAT2 */ 0x34B0, /* FW_HEARTBEAT */ 0x3500, /* FLASH LOCK_ID */ 0x3528, /* FW_CAPABILITIES */ 0x3538, /* Driver active, DRV_REG0 */ 0x3540, /* Device state, DRV_REG1 */ 0x3544, /* Driver state, DRV_REG2 */ 0x3548, /* Driver scratch, DRV_REG3 */ 0x354C, /* Device partition info, DRV_REG4 */ 0x3524, /* Driver IDC ver, DRV_REG5 */ 0x3550, /* FW_VER_MAJOR */ 0x3554, /* FW_VER_MINOR */ 0x3558, /* FW_VER_SUB */ 0x359C, /* NPAR STATE */ 0x35FC, /* FW_IMG_VALID */ 0x3650, /* CMD_PEG_STATE */ 0x373C, /* RCV_PEG_STATE */ 0x37B4, /* ASIC TEMP */ 0x356C, /* FW API */ 0x3570, /* DRV OP MODE */ 0x3850, /* FLASH LOCK */ 0x3854, /* FLASH UNLOCK */ }; static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .read_crb = qlcnic_83xx_read_crb, .write_crb = qlcnic_83xx_write_crb, .read_reg = qlcnic_83xx_rd_reg_indirect, .write_reg = qlcnic_83xx_wrt_reg_indirect, .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, .mbx_cmd = qlcnic_83xx_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, .add_sysfs = qlcnic_83xx_add_sysfs, .remove_sysfs = qlcnic_83xx_remove_sysfs, .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_83xx_create_rx_ctx, .create_tx_ctx = qlcnic_83xx_create_tx_ctx, .del_rx_ctx = qlcnic_83xx_del_rx_ctx, .del_tx_ctx = qlcnic_83xx_del_tx_ctx, .setup_link_event = qlcnic_83xx_setup_link_event, .get_nic_info = qlcnic_83xx_get_nic_info, .get_pci_info = qlcnic_83xx_get_pci_info, .set_nic_info = qlcnic_83xx_set_nic_info, .change_macvlan = qlcnic_83xx_sre_macaddr_change, .napi_enable = qlcnic_83xx_napi_enable, .napi_disable = qlcnic_83xx_napi_disable, .config_intr_coal = qlcnic_83xx_config_intr_coal, .config_rss = qlcnic_83xx_config_rss, .config_hw_lro = qlcnic_83xx_config_hw_lro, .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, .io_error_detected = qlcnic_83xx_io_error_detected, .io_slot_reset = qlcnic_83xx_io_slot_reset, .io_resume = qlcnic_83xx_io_resume, .get_beacon_state = qlcnic_83xx_get_beacon_state, .enable_sds_intr = qlcnic_83xx_enable_sds_intr, .disable_sds_intr = qlcnic_83xx_disable_sds_intr, .enable_tx_intr = qlcnic_83xx_enable_tx_intr, .disable_tx_intr = qlcnic_83xx_disable_tx_intr, .get_saved_state = qlcnic_83xx_get_saved_state, .set_saved_state = qlcnic_83xx_set_saved_state, .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values, .get_cap_size = qlcnic_83xx_get_cap_size, .set_sys_info = qlcnic_83xx_set_sys_info, .store_cap_mask = qlcnic_83xx_store_cap_mask, .encap_rx_offload = qlcnic_83xx_encap_rx_offload, .encap_tx_offload = qlcnic_83xx_encap_tx_offload, }; static struct qlcnic_nic_template qlcnic_83xx_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_config_led, .request_reset = qlcnic_83xx_idc_request_reset, .cancel_idc_work = qlcnic_83xx_idc_exit, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, .shutdown = qlcnic_83xx_shutdown, .resume = qlcnic_83xx_resume, }; void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) { ahw->hw_ops = &qlcnic_83xx_hw_ops; ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; } int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter) { u32 fw_major, fw_minor, fw_build; struct pci_dev *pdev = adapter->pdev; fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n", QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); return adapter->fw_version; } static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr) { void __iomem *base; u32 val; base = adapter->ahw->pci_base0 + QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func); writel(addr, base); val = readl(base); if (val != addr) return -EIO; return 0; } int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, int *err) { struct qlcnic_hardware_context *ahw = adapter->ahw; *err = __qlcnic_set_win_base(adapter, (u32) addr); if (!*err) { return QLCRDX(ahw, QLCNIC_WILDCARD); } else { dev_err(&adapter->pdev->dev, "%s failed, addr = 0x%lx\n", __func__, addr); return -EIO; } } int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, u32 data) { int err; struct qlcnic_hardware_context *ahw = adapter->ahw; err = __qlcnic_set_win_base(adapter, (u32) addr); if (!err) { QLCWRX(ahw, QLCNIC_WILDCARD, data); return 0; } else { dev_err(&adapter->pdev->dev, "%s failed, addr = 0x%x data = 0x%x\n", __func__, (int)addr, data); return err; } } static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; /* MSI-X enablement failed, use legacy interrupt */ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR; adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK; adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR; adapter->msix_entries[0].vector = adapter->pdev->irq; dev_info(&adapter->pdev->dev, "using legacy interrupt\n"); } static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter) { int num_msix; num_msix = adapter->drv_sds_rings; /* account for AEN interrupt MSI-X based interrupts */ num_msix += 1; if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) num_msix += adapter->drv_tx_rings; return num_msix; } int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err, i, num_msix; if (adapter->flags & QLCNIC_TSS_RSS) { err = qlcnic_setup_tss_rss_intr(adapter); if (err < 0) return err; num_msix = ahw->num_msix; } else { num_msix = qlcnic_83xx_calculate_msix_vector(adapter); err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) return err; if (adapter->flags & QLCNIC_MSIX_ENABLED) { num_msix = ahw->num_msix; } else { if (qlcnic_sriov_vf_check(adapter)) return -EINVAL; num_msix = 1; adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->drv_tx_rings = QLCNIC_SINGLE_RING; } } /* setup interrupt mapping table for fw */ ahw->intr_tbl = vzalloc(array_size(num_msix, sizeof(struct qlcnic_intrpt_config))); if (!ahw->intr_tbl) return -ENOMEM; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) { dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n", ahw->pci_func); return -EOPNOTSUPP; } qlcnic_83xx_enable_legacy(adapter); } for (i = 0; i < num_msix; i++) { if (adapter->flags & QLCNIC_MSIX_ENABLED) ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; else ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX; ahw->intr_tbl[i].id = i; ahw->intr_tbl[i].src = 0; } return 0; } static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) { writel(0, adapter->tgt_mask_reg); } static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter) { if (adapter->tgt_mask_reg) writel(1, adapter->tgt_mask_reg); } static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter *adapter) { u32 mask; /* Mailbox in MSI-x mode and Legacy Interrupt share the same * source register. We could be here before contexts are created * and sds_ring->crb_intr_mask has not been initialized, calculate * BAR offset for Interrupt Source Register */ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); writel(0, adapter->ahw->pci_base0 + mask); } void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter) { u32 mask; mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); writel(1, adapter->ahw->pci_base0 + mask); QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0); } static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { int i; if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) return; for (i = 0; i < cmd->rsp.num; i++) cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); } irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) { u32 intr_val; struct qlcnic_hardware_context *ahw = adapter->ahw; int retries = 0; intr_val = readl(adapter->tgt_status_reg); if (!QLC_83XX_VALID_INTX_BIT31(intr_val)) return IRQ_NONE; if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) { adapter->stats.spurious_intr++; return IRQ_NONE; } /* The barrier is required to ensure writes to the registers */ wmb(); /* clear the interrupt trigger control register */ writel_relaxed(0, adapter->isr_int_vec); intr_val = readl(adapter->isr_int_vec); do { intr_val = readl(adapter->tgt_status_reg); if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func) break; retries++; } while (QLC_83XX_VALID_INTX_BIT30(intr_val) && (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY)); return IRQ_HANDLED; } static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) { mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; complete(&mbx->completion); } static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) { u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; unsigned long flags; spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } out: qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); spin_unlock_irqrestore(&mbx->aen_lock, flags); } irqreturn_t qlcnic_83xx_intr(int irq, void *data) { struct qlcnic_adapter *adapter = data; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE) return IRQ_NONE; qlcnic_83xx_poll_process_aen(adapter); if (ahw->diag_test) { if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) ahw->diag_cnt++; qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); return IRQ_HANDLED; } if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); } else { sds_ring = &adapter->recv_ctx->sds_rings[0]; napi_schedule(&sds_ring->napi); } return IRQ_HANDLED; } irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data) { struct qlcnic_host_sds_ring *sds_ring = data; struct qlcnic_adapter *adapter = sds_ring->adapter; if (adapter->flags & QLCNIC_MSIX_ENABLED) goto done; if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE) return IRQ_NONE; done: adapter->ahw->diag_cnt++; qlcnic_enable_sds_intr(adapter, sds_ring); return IRQ_HANDLED; } void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) { u32 num_msix; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_set_legacy_intr_mask(adapter); qlcnic_83xx_disable_mbx_intr(adapter); if (adapter->flags & QLCNIC_MSIX_ENABLED) num_msix = adapter->ahw->num_msix - 1; else num_msix = 0; msleep(20); if (adapter->msix_entries) { synchronize_irq(adapter->msix_entries[num_msix].vector); free_irq(adapter->msix_entries[num_msix].vector, adapter); } } int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) { irq_handler_t handler; u32 val; int err = 0; unsigned long flags = 0; if (!(adapter->flags & QLCNIC_MSI_ENABLED) && !(adapter->flags & QLCNIC_MSIX_ENABLED)) flags |= IRQF_SHARED; if (adapter->flags & QLCNIC_MSIX_ENABLED) { handler = qlcnic_83xx_handle_aen; val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; err = request_irq(val, handler, flags, "qlcnic-MB", adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to register MBX interrupt\n"); return err; } } else { handler = qlcnic_83xx_intr; val = adapter->msix_entries[0].vector; err = request_irq(val, handler, flags, "qlcnic", adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to register INTx interrupt\n"); return err; } qlcnic_83xx_clear_legacy_intr_mask(adapter); } /* Enable mailbox interrupt */ qlcnic_83xx_enable_mbx_interrupt(adapter); return err; } void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) { u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); adapter->ahw->pci_func = (val >> 24) & 0xff; } int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) { void __iomem *addr; u32 val, limit = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func); do { val = readl(addr); if (val) { /* write the function number to register */ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, ahw->pci_func); return 0; } usleep_range(1000, 2000); } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT); return -EIO; } void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter) { void __iomem *addr; struct qlcnic_hardware_context *ahw = adapter->ahw; addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func); readl(addr); } void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { int ret = 0; u32 data; if (qlcnic_api_lock(adapter)) { dev_err(&adapter->pdev->dev, "%s: failed to acquire lock. addr offset 0x%x\n", __func__, (u32)offset); return; } data = QLCRD32(adapter, (u32) offset, &ret); qlcnic_api_unlock(adapter); if (ret == -EIO) { dev_err(&adapter->pdev->dev, "%s: failed. addr offset 0x%x\n", __func__, (u32)offset); return; } memcpy(buf, &data, size); } void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { u32 data; memcpy(&data, buf, size); qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data); } int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int status; status = qlcnic_83xx_get_port_config(adapter); if (status) { dev_err(&adapter->pdev->dev, "Get Port Info failed\n"); } else { if (ahw->port_config & QLC_83XX_10G_CAPABLE) { ahw->port_type = QLCNIC_XGBE; } else if (ahw->port_config & QLC_83XX_10_CAPABLE || ahw->port_config & QLC_83XX_100_CAPABLE || ahw->port_config & QLC_83XX_1G_CAPABLE) { ahw->port_type = QLCNIC_GBE; } else { ahw->port_type = QLCNIC_XGBE; } if (QLC_83XX_AUTONEG(ahw->port_config)) ahw->link_autoneg = AUTONEG_ENABLE; } return status; } static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u16 act_pci_fn = ahw->total_nic_func; u16 count; ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT; if (act_pci_fn <= 2) count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) / act_pci_fn; else count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) / act_pci_fn; ahw->max_uc_count = count; } void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter) { u32 val; if (adapter->flags & QLCNIC_MSIX_ENABLED) val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8); else val = BIT_2; QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val); qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); } void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter, const struct pci_device_id *ent) { u32 op_mode, priv_level; struct qlcnic_hardware_context *ahw = adapter->ahw; ahw->fw_hal_version = 2; qlcnic_get_func_no(adapter); if (qlcnic_sriov_vf_check(adapter)) { qlcnic_sriov_vf_set_ops(adapter); return; } /* Determine function privilege level */ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); if (op_mode == QLC_83XX_DEFAULT_OPMODE) priv_level = QLCNIC_MGMT_FUNC; else priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, ahw->pci_func); if (priv_level == QLCNIC_NON_PRIV_FUNC) { ahw->op_mode = QLCNIC_NON_PRIV_FUNC; dev_info(&adapter->pdev->dev, "HAL Version: %d Non Privileged function\n", ahw->fw_hal_version); adapter->nic_ops = &qlcnic_vf_ops; } else { if (pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV)) set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state); adapter->nic_ops = &qlcnic_83xx_ops; } } static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, u32 data[]); static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, u32 data[]); void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { int i; if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) return; dev_info(&adapter->pdev->dev, "Host MBX regs(%d)\n", cmd->req.num); for (i = 0; i < cmd->req.num; i++) { if (i && !(i % 8)) pr_info("\n"); pr_info("%08x ", cmd->req.arg[i]); } pr_info("\n"); dev_info(&adapter->pdev->dev, "FW MBX regs(%d)\n", cmd->rsp.num); for (i = 0; i < cmd->rsp.num; i++) { if (i && !(i % 8)) pr_info("\n"); pr_info("%08x ", cmd->rsp.arg[i]); } pr_info("\n"); } static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; int opcode = LSW(cmd->req.arg[0]); unsigned long max_loops; max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP; for (; max_loops; max_loops--) { if (atomic_read(&cmd->rsp_status) == QLC_83XX_MBX_RESPONSE_ARRIVED) return; udelay(1); } dev_err(&adapter->pdev->dev, "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode); flush_workqueue(ahw->mailbox->work_q); return; } int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlcnic_hardware_context *ahw = adapter->ahw; int cmd_type, err, opcode; unsigned long timeout; if (!mbx) return -EIO; opcode = LSW(cmd->req.arg[0]); cmd_type = cmd->type; err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout); if (err) { dev_err(&adapter->pdev->dev, "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode); return err; } switch (cmd_type) { case QLC_83XX_MBX_CMD_WAIT: if (!wait_for_completion_timeout(&cmd->completion, timeout)) { dev_err(&adapter->pdev->dev, "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, opcode, cmd_type, ahw->pci_func, ahw->op_mode); flush_workqueue(mbx->work_q); } break; case QLC_83XX_MBX_CMD_NO_WAIT: return 0; case QLC_83XX_MBX_CMD_BUSY_WAIT: qlcnic_83xx_poll_for_mbx_completion(adapter, cmd); break; default: dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, opcode, cmd_type, ahw->pci_func, ahw->op_mode); qlcnic_83xx_detach_mailbox_work(adapter); } return cmd->rsp_opcode; } int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, struct qlcnic_adapter *adapter, u32 type) { int i, size; u32 temp; const struct qlcnic_mailbox_metadata *mbx_tbl; memset(mbx, 0, sizeof(struct qlcnic_cmd_args)); mbx_tbl = qlcnic_83xx_mbx_tbl; size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { mbx->op_type = QLC_83XX_FW_MBX_CMD; mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), GFP_ATOMIC); if (!mbx->req.arg) return -ENOMEM; mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), GFP_ATOMIC); if (!mbx->rsp.arg) { kfree(mbx->req.arg); mbx->req.arg = NULL; return -ENOMEM; } temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); mbx->cmd_op = type; return 0; } } dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n", __func__, type); return -EINVAL; } void qlcnic_83xx_idc_aen_work(struct work_struct *work) { struct qlcnic_adapter *adapter; struct qlcnic_cmd_args cmd; int i, err = 0; adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); if (err) return; for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "%s: Mailbox IDC ACK failed.\n", __func__); qlcnic_free_mbx_args(&cmd); } static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, u32 data[]) { dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n", QLCNIC_MBX_RSP(data[0])); clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status); return; } static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 event[QLC_83XX_MBX_AEN_CNT]; int i; for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) event[i] = readl(QLCNIC_MBX_FW(ahw, i)); switch (QLCNIC_MBX_RSP(event[0])) { case QLCNIC_MBX_LINK_EVENT: qlcnic_83xx_handle_link_aen(adapter, event); break; case QLCNIC_MBX_COMP_EVENT: qlcnic_83xx_handle_idc_comp_aen(adapter, event); break; case QLCNIC_MBX_REQUEST_EVENT: for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]); queue_delayed_work(adapter->qlcnic_wq, &adapter->idc_aen_work, 0); break; case QLCNIC_MBX_TIME_EXTEND_EVENT: ahw->extend_lb_time = event[1] >> 8 & 0xf; break; case QLCNIC_MBX_BC_EVENT: qlcnic_sriov_handle_bc_event(adapter, event[1]); break; case QLCNIC_MBX_SFP_INSERT_EVENT: dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); break; case QLCNIC_MBX_SFP_REMOVE_EVENT: dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); break; case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT: qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]); break; default: dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); break; } QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); } static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) { u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; unsigned long flags; spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); if (resp & QLCNIC_SET_OWNER) { event = readl(QLCNIC_MBX_FW(ahw, 0)); if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } } spin_unlock_irqrestore(&mbx->aen_lock, flags); } static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) { struct qlcnic_adapter *adapter; adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work); if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) return; qlcnic_83xx_process_aen(adapter); queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, (HZ / 10)); } void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter) { if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) return; INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0); } void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) { if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) return; cancel_delayed_work_sync(&adapter->mbx_poll_work); } static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) { int index, i, err, sds_mbx_size; u32 *buf, intrpt_id, intr_mask; u16 context_id; u8 num_sds; struct qlcnic_cmd_args cmd; struct qlcnic_host_sds_ring *sds; struct qlcnic_sds_mbx sds_mbx; struct qlcnic_add_rings_mbx_out *mbx_out; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_hardware_context *ahw = adapter->ahw; sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_ADD_RCV_RINGS); if (err) { dev_err(&adapter->pdev->dev, "Failed to alloc mbx args %d\n", err); return err; } cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ index = 2; for (i = 8; i < adapter->drv_sds_rings; i++) { memset(&sds_mbx, 0, sds_mbx_size); sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); sds_mbx.phy_addr_low = LSD(sds->phys_addr); sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[i].id; else intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) sds_mbx.intrpt_id = intrpt_id; else sds_mbx.intrpt_id = 0xffff; sds_mbx.intrpt_val = 0; buf = &cmd.req.arg[index]; memcpy(buf, &sds_mbx, sds_mbx_size); index += sds_mbx_size / sizeof(u32); } /* send the mailbox command */ err = ahw->hw_ops->mbx_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to add rings %d\n", err); goto out; } mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1]; index = 0; /* status descriptor ring */ for (i = 8; i < adapter->drv_sds_rings; i++) { sds = &recv_ctx->sds_rings[i]; sds->crb_sts_consumer = ahw->pci_base0 + mbx_out->host_csmr[index]; if (adapter->flags & QLCNIC_MSIX_ENABLED) intr_mask = ahw->intr_tbl[i].src; else intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); sds->crb_intr_mask = ahw->pci_base0 + intr_mask; index++; } out: qlcnic_free_mbx_args(&cmd); return err; } void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter) { int err; u32 temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) return; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); if (qlcnic_sriov_pf_check(adapter)) qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp); cmd.req.arg[1] = recv_ctx->context_id | temp; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to destroy rx ctx in firmware\n"); recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; qlcnic_free_mbx_args(&cmd); } int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) { int i, err, index, sds_mbx_size, rds_mbx_size; u8 num_sds, num_rds; u32 *buf, intrpt_id, intr_mask, cap = 0; struct qlcnic_host_sds_ring *sds; struct qlcnic_host_rds_ring *rds; struct qlcnic_sds_mbx sds_mbx; struct qlcnic_rds_mbx rds_mbx; struct qlcnic_cmd_args cmd; struct qlcnic_rcv_mbx_out *mbx_out; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_hardware_context *ahw = adapter->ahw; num_rds = adapter->max_rds_rings; if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS) num_sds = adapter->drv_sds_rings; else num_sds = QLCNIC_MAX_SDS_RINGS; sds_mbx_size = sizeof(struct qlcnic_sds_mbx); rds_mbx_size = sizeof(struct qlcnic_rds_mbx); cap = QLCNIC_CAP0_LEGACY_CONTEXT; if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) cap |= QLC_83XX_FW_CAP_LRO_MSS; /* set mailbox hdr and capabilities */ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); if (err) return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); cmd.req.arg[1] = cap; cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); if (qlcnic_sriov_pf_check(adapter)) qlcnic_pf_set_interface_id_create_rx_ctx(adapter, &cmd.req.arg[6]); /* set up status rings, mbx 8-57/87 */ index = QLC_83XX_HOST_SDS_MBX_IDX; for (i = 0; i < num_sds; i++) { memset(&sds_mbx, 0, sds_mbx_size); sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); sds_mbx.phy_addr_low = LSD(sds->phys_addr); sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[i].id; else intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) sds_mbx.intrpt_id = intrpt_id; else sds_mbx.intrpt_id = 0xffff; sds_mbx.intrpt_val = 0; buf = &cmd.req.arg[index]; memcpy(buf, &sds_mbx, sds_mbx_size); index += sds_mbx_size / sizeof(u32); } /* set up receive rings, mbx 88-111/135 */ index = QLCNIC_HOST_RDS_MBX_IDX; rds = &recv_ctx->rds_rings[0]; rds->producer = 0; memset(&rds_mbx, 0, rds_mbx_size); rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr); rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr); rds_mbx.reg_ring_sz = rds->dma_size; rds_mbx.reg_ring_len = rds->num_desc; /* Jumbo ring */ rds = &recv_ctx->rds_rings[1]; rds->producer = 0; rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr); rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr); rds_mbx.jmb_ring_sz = rds->dma_size; rds_mbx.jmb_ring_len = rds->num_desc; buf = &cmd.req.arg[index]; memcpy(buf, &rds_mbx, rds_mbx_size); /* send the mailbox command */ err = ahw->hw_ops->mbx_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to create Rx ctx in firmware%d\n", err); goto out; } mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1]; recv_ctx->context_id = mbx_out->ctx_id; recv_ctx->state = mbx_out->state; recv_ctx->virt_port = mbx_out->vport_id; dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n", recv_ctx->context_id, recv_ctx->state); /* Receive descriptor ring */ /* Standard ring */ rds = &recv_ctx->rds_rings[0]; rds->crb_rcv_producer = ahw->pci_base0 + mbx_out->host_prod[0].reg_buf; /* Jumbo ring */ rds = &recv_ctx->rds_rings[1]; rds->crb_rcv_producer = ahw->pci_base0 + mbx_out->host_prod[0].jmb_buf; /* status descriptor ring */ for (i = 0; i < num_sds; i++) { sds = &recv_ctx->sds_rings[i]; sds->crb_sts_consumer = ahw->pci_base0 + mbx_out->host_csmr[i]; if (adapter->flags & QLCNIC_MSIX_ENABLED) intr_mask = ahw->intr_tbl[i].src; else intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); sds->crb_intr_mask = ahw->pci_base0 + intr_mask; } if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS) err = qlcnic_83xx_add_rings(adapter); out: qlcnic_free_mbx_args(&cmd); return err; } void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; u32 temp = 0; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) return; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); if (qlcnic_sriov_pf_check(adapter)) qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp); cmd.req.arg[1] = tx_ring->ctx_id | temp; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to destroy tx ctx in firmware\n"); qlcnic_free_mbx_args(&cmd); } int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx, int ring) { int err; u16 msix_id; u32 *buf, intr_mask, temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_tx_mbx mbx; struct qlcnic_tx_mbx_out *mbx_out; struct qlcnic_hardware_context *ahw = adapter->ahw; u32 msix_vector; /* Reset host resources */ tx->producer = 0; tx->sw_consumer = 0; *(tx->hw_consumer) = 0; memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); /* setup mailbox inbox registerss */ mbx.phys_addr_low = LSD(tx->phys_addr); mbx.phys_addr_high = MSD(tx->phys_addr); mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr); mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr); mbx.size = tx->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) { if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) msix_vector = adapter->drv_sds_rings + ring; else msix_vector = adapter->drv_sds_rings - 1; msix_id = ahw->intr_tbl[msix_vector].id; } else { msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); } if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) mbx.intr_id = msix_id; else mbx.intr_id = 0xffff; mbx.src = 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); if (err) return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); if (qlcnic_sriov_pf_check(adapter)) qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp); cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp; buf = &cmd.req.arg[6]; memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); /* send the mailbox command*/ err = qlcnic_issue_cmd(adapter, &cmd); if (err) { netdev_err(adapter->netdev, "Failed to create Tx ctx in firmware 0x%x\n", err); goto out; } mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; tx->ctx_id = mbx_out->ctx_id; if ((adapter->flags & QLCNIC_MSIX_ENABLED) && !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src; tx->crb_intr_mask = ahw->pci_base0 + intr_mask; } netdev_info(adapter->netdev, "Tx Context[0x%x] Created, state:0x%x\n", tx->ctx_id, mbx_out->state); out: qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, u8 num_sds_ring) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; u16 adapter_state = adapter->is_up; u8 ring; int ret; netif_device_detach(netdev); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_detach(adapter); adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->ahw->diag_test = test; adapter->ahw->linkup = 0; ret = qlcnic_attach(adapter); if (ret) { netif_device_attach(netdev); return ret; } ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { adapter->drv_sds_rings = num_sds_ring; qlcnic_attach(adapter); } netif_device_attach(netdev); return ret; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx->rds_rings[ring]; qlcnic_post_rx_buffers(adapter, rds_ring, ring); } if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_enable_sds_intr(adapter, sds_ring); } } if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { adapter->ahw->loopback_state = 0; adapter->ahw->hw_ops->setup_link_event(adapter, 1); } set_bit(__QLCNIC_DEV_UP, &adapter->state); return 0; } static void qlcnic_83xx_diag_free_res(struct net_device *netdev, u8 drv_sds_rings) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; int ring; clear_bit(__QLCNIC_DEV_UP, &adapter->state); if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) qlcnic_disable_sds_intr(adapter, sds_ring); } } qlcnic_fw_destroy_ctx(adapter); qlcnic_detach(adapter); adapter->ahw->diag_test = 0; adapter->drv_sds_rings = drv_sds_rings; if (qlcnic_attach(adapter)) goto out; if (netif_running(netdev)) __qlcnic_up(adapter, netdev); out: netif_device_attach(netdev); } static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; u8 beacon_state; int err = 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG); if (!err) { err = qlcnic_issue_cmd(adapter, &cmd); if (!err) { beacon_state = cmd.rsp.arg[4]; if (beacon_state == QLCNIC_BEACON_DISABLE) ahw->beacon_state = QLC_83XX_BEACON_OFF; else if (beacon_state == QLC_83XX_ENABLE_BEACON) ahw->beacon_state = QLC_83XX_BEACON_ON; } } else { netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n", err); } qlcnic_free_mbx_args(&cmd); return; } int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 beacon) { struct qlcnic_cmd_args cmd; u32 mbx_in; int i, status = 0; if (state) { /* Get LED configuration */ status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG); if (status) return status; status = qlcnic_issue_cmd(adapter, &cmd); if (status) { dev_err(&adapter->pdev->dev, "Get led config failed.\n"); goto mbx_err; } else { for (i = 0; i < 4; i++) adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1]; } qlcnic_free_mbx_args(&cmd); /* Set LED Configuration */ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | LSW(QLC_83XX_LED_CONFIG); status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_LED_CONFIG); if (status) return status; cmd.req.arg[1] = mbx_in; cmd.req.arg[2] = mbx_in; cmd.req.arg[3] = mbx_in; if (beacon) cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON; status = qlcnic_issue_cmd(adapter, &cmd); if (status) { dev_err(&adapter->pdev->dev, "Set led config failed.\n"); } mbx_err: qlcnic_free_mbx_args(&cmd); return status; } else { /* Restoring default LED configuration */ status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_LED_CONFIG); if (status) return status; cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; if (beacon) cmd.req.arg[4] = adapter->ahw->mbox_reg[3]; status = qlcnic_issue_cmd(adapter, &cmd); if (status) dev_err(&adapter->pdev->dev, "Restoring led config failed.\n"); qlcnic_free_mbx_args(&cmd); return status; } } int qlcnic_83xx_set_led(struct net_device *netdev, enum ethtool_phys_id_state state) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EIO, active = 1; if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, "LED test is not supported in non-privileged mode\n"); return -EOPNOTSUPP; } switch (state) { case ETHTOOL_ID_ACTIVE: if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) return -EBUSY; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; err = qlcnic_83xx_config_led(adapter, active, 0); if (err) netdev_err(netdev, "Failed to set LED blink state\n"); break; case ETHTOOL_ID_INACTIVE: active = 0; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) break; err = qlcnic_83xx_config_led(adapter, active, 0); if (err) netdev_err(netdev, "Failed to reset LED blink state\n"); break; default: return -EINVAL; } if (!active || err) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); return err; } void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_cmd_args cmd; int status; if (qlcnic_sriov_vf_check(adapter)) return; if (enable) status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); else status = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); if (status) return; cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES; if (adapter->dcb) cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN; status = qlcnic_issue_cmd(adapter, &cmd); if (status) dev_err(&adapter->pdev->dev, "Failed to %s in NIC IDC function event.\n", (enable ? "register" : "unregister")); qlcnic_free_mbx_args(&cmd); } static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); if (err) return err; cmd.req.arg[1] = adapter->ahw->port_config; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Set Port Config failed.\n"); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); if (err) return err; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Get Port config failed\n"); else adapter->ahw->port_config = cmd.rsp.arg[1]; qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) { int err; u32 temp; struct qlcnic_cmd_args cmd; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); if (err) return err; temp = adapter->recv_ctx->context_id << 16; cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Setup linkevent mailbox failed\n"); qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *interface_id) { if (qlcnic_sriov_pf_check(adapter)) { qlcnic_alloc_lb_filters_mem(adapter); qlcnic_pf_set_interface_id_promisc(adapter, interface_id); adapter->rx_mac_learn = true; } else { if (!qlcnic_sriov_vf_check(adapter)) *interface_id = adapter->recv_ctx->context_id << 16; } } int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { struct qlcnic_cmd_args *cmd = NULL; u32 temp = 0; int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); if (err) goto out; cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; qlcnic_83xx_set_interface_id_promisc(adapter, &temp); if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter)) mode = VPORT_MISS_MODE_ACCEPT_ALL; cmd->req.arg[1] = mode | temp; err = qlcnic_issue_cmd(adapter, cmd); if (!err) return err; qlcnic_free_mbx_args(cmd); out: kfree(cmd); return err; } int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; u8 drv_sds_rings = adapter->drv_sds_rings; u8 drv_tx_rings = adapter->drv_tx_rings; int ret = 0, loop = 0; if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, "Loopback test not supported in non privileged mode\n"); return -ENOTSUPP; } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting\n"); return -EBUSY; } if (qlcnic_get_diag_lock(adapter)) { netdev_info(netdev, "Device is in diagnostics mode\n"); return -EBUSY; } netdev_info(netdev, "%s loopback test in progress\n", mode == QLCNIC_ILB_MODE ? "internal" : "external"); ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, drv_sds_rings); if (ret) goto fail_diag_alloc; ret = qlcnic_83xx_set_lb_mode(adapter, mode); if (ret) goto free_diag_res; /* Poll for link up event before running traffic */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); ret = -EBUSY; goto free_diag_res; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_info(netdev, "Firmware didn't sent link up event to loopback request\n"); ret = -ETIMEDOUT; qlcnic_83xx_clear_lb_mode(adapter, mode); goto free_diag_res; } } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); ret = qlcnic_do_lb_test(adapter, mode); qlcnic_83xx_clear_lb_mode(adapter, mode); free_diag_res: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_alloc: adapter->drv_sds_rings = drv_sds_rings; adapter->drv_tx_rings = drv_tx_rings; qlcnic_release_diag_lock(adapter); return ret; } static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter, u32 *max_wait_count) { struct qlcnic_hardware_context *ahw = adapter->ahw; int temp; netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n", ahw->extend_lb_time); temp = ahw->extend_lb_time * 1000; *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT; ahw->extend_lb_time = 0; } static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct net_device *netdev = adapter->netdev; u32 config, max_wait_count; int status = 0, loop = 0; ahw->extend_lb_time = 0; max_wait_count = QLC_83XX_LB_WAIT_COUNT; status = qlcnic_83xx_get_port_config(adapter); if (status) return status; config = ahw->port_config; /* Check if port is already in loopback mode */ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) || (config & QLC_83XX_CFG_LOOPBACK_EXT)) { netdev_err(netdev, "Port already in Loopback mode.\n"); return -EINPROGRESS; } set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); if (mode == QLCNIC_ILB_MODE) ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS; if (mode == QLCNIC_ELB_MODE) ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT; status = qlcnic_83xx_set_port_config(adapter); if (status) { netdev_err(netdev, "Failed to Set Loopback Mode = 0x%x.\n", ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; } /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -EBUSY; } if (ahw->extend_lb_time) qlcnic_extend_lb_idc_cmpltn_wait(adapter, &max_wait_count); if (loop++ > max_wait_count) { netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", __func__); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, QLCNIC_MAC_ADD); return status; } static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = ahw->port_config, max_wait_count; struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; ahw->extend_lb_time = 0; max_wait_count = QLC_83XX_LB_WAIT_COUNT; set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); if (mode == QLCNIC_ILB_MODE) ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; if (mode == QLCNIC_ELB_MODE) ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT; status = qlcnic_83xx_set_port_config(adapter); if (status) { netdev_err(netdev, "Failed to Clear Loopback Mode = 0x%x.\n", ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; } /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -EBUSY; } if (ahw->extend_lb_time) qlcnic_extend_lb_idc_cmpltn_wait(adapter, &max_wait_count); if (loop++ > max_wait_count) { netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", __func__); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, QLCNIC_MAC_DEL); return status; } static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *interface_id) { if (qlcnic_sriov_pf_check(adapter)) { qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id); } else { if (!qlcnic_sriov_vf_check(adapter)) *interface_id = adapter->recv_ctx->context_id << 16; } } void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int mode) { int err; u32 temp = 0, temp_ip; struct qlcnic_cmd_args cmd; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); if (err) return; qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); if (mode == QLCNIC_IP_UP) cmd.req.arg[1] = 1 | temp; else cmd.req.arg[1] = 2 | temp; /* * Adapter needs IP address in network byte order. * But hardware mailbox registers go through writel(), hence IP address * gets swapped on big endian architecture. * To negate swapping of writel() on big endian architecture * use swab32(value). */ temp_ip = swab32(ntohl(ip)); memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32)); err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x request\n", (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); qlcnic_free_mbx_args(&cmd); } int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) { int err; u32 temp, arg1; struct qlcnic_cmd_args cmd; int lro_bit_mask; lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0); if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); if (err) return err; temp = adapter->recv_ctx->context_id << 16; arg1 = lro_bit_mask | temp; cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "LRO config failed\n"); qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) { int err; u32 word; struct qlcnic_cmd_args cmd; const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); if (err) return err; /* * RSS request: * bits 3-0: Rsvd * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 16-31: indirection table mask */ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u32)(enable & 0x1) << 8) | ((0x7ULL) << 16); cmd.req.arg[1] = (adapter->recv_ctx->context_id); cmd.req.arg[2] = word; memcpy(&cmd.req.arg[4], key, sizeof(key)); err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "RSS config failed\n"); qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *interface_id) { if (qlcnic_sriov_pf_check(adapter)) { qlcnic_pf_set_interface_id_macaddr(adapter, interface_id); } else { if (!qlcnic_sriov_vf_check(adapter)) *interface_id = adapter->recv_ctx->context_id << 16; } } int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, u16 vlan_id, u8 op) { struct qlcnic_cmd_args *cmd = NULL; struct qlcnic_macvlan_mbx mv; u32 *buf, temp = 0; int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) goto out; cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; if (vlan_id) op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; cmd->req.arg[1] = op | (1 << 8); qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); cmd->req.arg[1] |= temp; mv.vlan = vlan_id; mv.mac_addr0 = addr[0]; mv.mac_addr1 = addr[1]; mv.mac_addr2 = addr[2]; mv.mac_addr3 = addr[3]; mv.mac_addr4 = addr[4]; mv.mac_addr5 = addr[5]; buf = &cmd->req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, cmd); if (!err) return err; qlcnic_free_mbx_args(cmd); out: kfree(cmd); return err; } void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) { u8 mac[ETH_ALEN]; memcpy(&mac, addr, ETH_ALEN); qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD); } static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac, u8 type, struct qlcnic_cmd_args *cmd) { switch (type) { case QLCNIC_SET_STATION_MAC: case QLCNIC_SET_FAC_DEF_MAC: memcpy(&cmd->req.arg[2], mac, sizeof(u32)); memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16)); break; } cmd->req.arg[1] = type; } int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, u8 function) { int err, i; struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); if (err) return err; qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); err = qlcnic_issue_cmd(adapter, &cmd); if (err == QLCNIC_RCODE_SUCCESS) { mac_low = cmd.rsp.arg[1]; mac_high = cmd.rsp.arg[2]; for (i = 0; i < 2; i++) mac[i] = (u8) (mac_high >> ((1 - i) * 8)); for (i = 2; i < 6; i++) mac[i] = (u8) (mac_low >> ((5 - i) * 8)); } else { dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; struct qlcnic_cmd_args cmd; u16 temp; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); if (err) return err; temp = adapter->recv_ctx->context_id; cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16; temp = coal->rx_time_us; cmd.req.arg[2] = coal->rx_packets | temp << 16; cmd.req.arg[3] = coal->flag; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) netdev_err(adapter->netdev, "failed to set interrupt coalescing parameters\n"); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; struct qlcnic_cmd_args cmd; u16 temp; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); if (err) return err; temp = adapter->tx_ring->ctx_id; cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16; temp = coal->tx_time_us; cmd.req.arg[2] = coal->tx_packets | temp << 16; cmd.req.arg[3] = coal->flag; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) netdev_err(adapter->netdev, "failed to set interrupt coalescing parameters\n"); qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter) { int err = 0; err = qlcnic_83xx_set_rx_intr_coal(adapter); if (err) netdev_err(adapter->netdev, "failed to set Rx coalescing parameters\n"); err = qlcnic_83xx_set_tx_intr_coal(adapter); if (err) netdev_err(adapter->netdev, "failed to set Tx coalescing parameters\n"); return err; } int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter, struct ethtool_coalesce *ethcoal) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; u32 rx_coalesce_usecs, rx_max_frames; u32 tx_coalesce_usecs, tx_max_frames; int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; tx_coalesce_usecs = ethcoal->tx_coalesce_usecs; tx_max_frames = ethcoal->tx_max_coalesced_frames; rx_coalesce_usecs = ethcoal->rx_coalesce_usecs; rx_max_frames = ethcoal->rx_max_coalesced_frames; coal->flag = QLCNIC_INTR_DEFAULT; if ((coal->rx_time_us == rx_coalesce_usecs) && (coal->rx_packets == rx_max_frames)) { coal->type = QLCNIC_INTR_COAL_TYPE_TX; coal->tx_time_us = tx_coalesce_usecs; coal->tx_packets = tx_max_frames; } else if ((coal->tx_time_us == tx_coalesce_usecs) && (coal->tx_packets == tx_max_frames)) { coal->type = QLCNIC_INTR_COAL_TYPE_RX; coal->rx_time_us = rx_coalesce_usecs; coal->rx_packets = rx_max_frames; } else { coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX; coal->rx_time_us = rx_coalesce_usecs; coal->rx_packets = rx_max_frames; coal->tx_time_us = tx_coalesce_usecs; coal->tx_packets = tx_max_frames; } switch (coal->type) { case QLCNIC_INTR_COAL_TYPE_RX: err = qlcnic_83xx_set_rx_intr_coal(adapter); break; case QLCNIC_INTR_COAL_TYPE_TX: err = qlcnic_83xx_set_tx_intr_coal(adapter); break; case QLCNIC_INTR_COAL_TYPE_RX_TX: err = qlcnic_83xx_set_rx_tx_intr_coal(adapter); break; default: err = -EINVAL; netdev_err(adapter->netdev, "Invalid Interrupt coalescing type\n"); break; } return err; } static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, u32 data[]) { struct qlcnic_hardware_context *ahw = adapter->ahw; u8 link_status, duplex; /* link speed */ link_status = LSB(data[3]) & 1; if (link_status) { ahw->link_speed = MSW(data[2]); duplex = LSB(MSW(data[3])); if (duplex) ahw->link_duplex = DUPLEX_FULL; else ahw->link_duplex = DUPLEX_HALF; } else { ahw->link_speed = SPEED_UNKNOWN; ahw->link_duplex = DUPLEX_UNKNOWN; } ahw->link_autoneg = MSB(MSW(data[3])); ahw->module_type = MSB(LSW(data[3])); ahw->has_link_events = 1; ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; qlcnic_advert_link_change(adapter, link_status); } static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_adapter *adapter = data; struct qlcnic_mailbox *mbx; unsigned long flags; mbx = adapter->ahw->mailbox; spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); else adapter->stats.mbx_spurious_intr++; } out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); writel(0, adapter->ahw->pci_base0 + mask); spin_unlock_irqrestore(&mbx->aen_lock, flags); return IRQ_HANDLED; } int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic) { int i, err = -EIO; struct qlcnic_cmd_args cmd; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { dev_err(&adapter->pdev->dev, "%s: Error, invoked by non management func\n", __func__); return err; } err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = (nic->pci_func << 16); cmd.req.arg[2] = 0x1 << 16; cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); cmd.req.arg[4] = nic->capabilities; cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16); cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16); cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16); for (i = 8; i < 32; i++) cmd.req.arg[i] = 0; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u8 func_id) { int err; u32 temp; u8 op = 0; struct qlcnic_cmd_args cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); if (err) return err; if (func_id != ahw->pci_func) { temp = func_id << 16; cmd.req.arg[1] = op | BIT_31 | temp; } else { cmd.req.arg[1] = ahw->pci_func << 16; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, "Failed to get nic info %d\n", err); goto out; } npar_info->op_type = cmd.rsp.arg[1]; npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF; npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16; npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF; npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16; npar_info->capabilities = cmd.rsp.arg[4]; npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF; npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16; npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF; npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16; npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF; npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16; if (cmd.rsp.arg[8] & 0x1) npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1; if (cmd.rsp.arg[8] & 0x10000) { temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; npar_info->max_linkspeed_reg_offset = temp; } memcpy(ahw->extra_capability, &cmd.rsp.arg[16], sizeof(ahw->extra_capability)); out: qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type, u16 *nic, u16 *fcoe, u16 *iscsi) { struct device *dev = &adapter->pdev->dev; int err = 0; switch (type) { case QLCNIC_TYPE_NIC: (*nic)++; break; case QLCNIC_TYPE_FCOE: (*fcoe)++; break; case QLCNIC_TYPE_ISCSI: (*iscsi)++; break; default: dev_err(dev, "%s: Unknown PCI type[%x]\n", __func__, type); err = -EIO; } return err; } int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, struct qlcnic_pci_info *pci_info) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct device *dev = &adapter->pdev->dev; u16 nic = 0, fcoe = 0, iscsi = 0; struct qlcnic_cmd_args cmd; int i, err = 0, j = 0; u32 temp; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); if (err) return err; err = qlcnic_issue_cmd(adapter, &cmd); ahw->total_nic_func = 0; if (err == QLCNIC_RCODE_SUCCESS) { ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF; for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) { pci_info->id = cmd.rsp.arg[i] & 0xFFFF; pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; i++; if (!pci_info->active) { i += QLC_SKIP_INACTIVE_PCI_REGS; continue; } pci_info->type = cmd.rsp.arg[i] & 0xFFFF; err = qlcnic_get_pci_func_type(adapter, pci_info->type, &nic, &fcoe, &iscsi); temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; pci_info->default_port = temp; i++; pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF; temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; pci_info->tx_max_bw = temp; i = i + 2; memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2); i++; memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); i = i + 3; } } else { dev_err(dev, "Failed to get PCI Info, error = %d\n", err); err = -EIO; } ahw->total_nic_func = nic; ahw->total_pci_func = nic + fcoe + iscsi; if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) { dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n", __func__, ahw->total_nic_func, ahw->total_pci_func); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) { int i, index, err; u8 max_ints; u32 val, temp, type; struct qlcnic_cmd_args cmd; max_ints = adapter->ahw->num_msix - 1; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); if (err) return err; cmd.req.arg[1] = max_ints; if (qlcnic_sriov_vf_check(adapter)) cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16; for (i = 0, index = 2; i < max_ints; i++) { type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (adapter->ahw->intr_tbl[i].type << 4); if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) val |= (adapter->ahw->intr_tbl[i].id << 16); cmd.req.arg[index++] = val; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to configure interrupts 0x%x\n", err); goto out; } max_ints = cmd.rsp.arg[1]; for (i = 0, index = 2; i < max_ints; i++, index += 2) { val = cmd.rsp.arg[index]; if (LSB(val)) { dev_info(&adapter->pdev->dev, "Can't configure interrupt %d\n", adapter->ahw->intr_tbl[i].id); continue; } if (op_type) { adapter->ahw->intr_tbl[i].id = MSW(val); adapter->ahw->intr_tbl[i].enabled = 1; temp = cmd.rsp.arg[index + 1]; adapter->ahw->intr_tbl[i].src = temp; } else { adapter->ahw->intr_tbl[i].id = i; adapter->ahw->intr_tbl[i].enabled = 0; adapter->ahw->intr_tbl[i].src = 0; } } out: qlcnic_free_mbx_args(&cmd); return err; } int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter) { int id, timeout = 0; u32 status = 0; while (status == 0) { status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); if (status) break; if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) { id = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK_OWNER); dev_err(&adapter->pdev->dev, "%s: failed, lock held by %d\n", __func__, id); return -EIO; } usleep_range(1000, 2000); } QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum); return 0; } void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter) { QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF); } int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, u8 *p_data, int count) { u32 word, range, flash_offset, addr = flash_addr, ret; ulong indirect_add, direct_window; int i, err = 0; flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); if (addr & 0x3) { dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); return -EIO; } qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, (addr & 0xFFFF0000)); range = flash_offset + (count * sizeof(u32)); /* Check if data is spread across multiple sectors */ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { /* Multi sector read */ for (i = 0; i < count; i++) { indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); ret = QLCRD32(adapter, indirect_add, &err); if (err == -EIO) return err; word = ret; *(u32 *)p_data = word; p_data = p_data + 4; addr = addr + 4; flash_offset = flash_offset + 4; if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { direct_window = QLC_83XX_FLASH_DIRECT_WINDOW; /* This write is needed once for each sector */ qlcnic_83xx_wrt_reg_indirect(adapter, direct_window, (addr)); flash_offset = 0; } } } else { /* Single sector read */ for (i = 0; i < count; i++) { indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); ret = QLCRD32(adapter, indirect_add, &err); if (err == -EIO) return err; word = ret; *(u32 *)p_data = word; p_data = p_data + 4; addr = addr + 4; } } return 0; } static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) { u32 status; int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; int err = 0; do { status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err); if (err == -EIO) return err; if ((status & QLC_83XX_FLASH_STATUS_READY) == QLC_83XX_FLASH_STATUS_READY) break; usleep_range(1000, 1100); } while (--retries); if (!retries) return -EIO; return 0; } int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter) { int ret; u32 cmd; cmd = adapter->ahw->fdt.write_statusreg_cmd; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd)); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, adapter->ahw->fdt.write_enable_bits); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) return -EIO; return 0; } int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter) { int ret; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | adapter->ahw->fdt.write_statusreg_cmd)); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, adapter->ahw->fdt.write_disable_bits); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) return -EIO; return 0; } int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) { int ret, err = 0; u32 mfg_id; if (qlcnic_83xx_lock_flash(adapter)) return -EIO; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_READ_CTRL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); return -EIO; } mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); if (err == -EIO) { qlcnic_83xx_unlock_flash(adapter); return err; } adapter->flash_mfg_id = (mfg_id & 0xFF); qlcnic_83xx_unlock_flash(adapter); return 0; } int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) { int count, fdt_size, ret = 0; fdt_size = sizeof(struct qlcnic_fdt); count = fdt_size / sizeof(u32); if (qlcnic_83xx_lock_flash(adapter)) return -EIO; memset(&adapter->ahw->fdt, 0, fdt_size); ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, (u8 *)&adapter->ahw->fdt, count); qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count); qlcnic_83xx_unlock_flash(adapter); return ret; } int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, u32 sector_start_addr) { u32 reversed_addr, addr1, addr2, cmd; int ret = -EIO; if (qlcnic_83xx_lock_flash(adapter) != 0) return -EIO; if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_enable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, "%s failed at %d\n", __func__, __LINE__); return ret; } } ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } addr1 = (sector_start_addr & 0xFF) << 16; addr2 = (sector_start_addr & 0xFF0000) >> 16; reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, reversed_addr); cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd; if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd); else qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_OEM_ERASE_SIG); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_LAST_ERASE_MS_VAL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return ret; } } qlcnic_83xx_unlock_flash(adapter); return 0; } int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr, u32 *p_data) { int ret = -EIO; u32 addr1 = 0x00800000 | (addr >> 2); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_LAST_ERASE_MS_VAL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } return 0; } int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, u32 *p_data, int count) { u32 temp; int ret = -EIO, err = 0; if ((count < QLC_83XX_FLASH_WRITE_MIN) || (count > QLC_83XX_FLASH_WRITE_MAX)) { dev_err(&adapter->pdev->dev, "%s: Invalid word count\n", __func__); return -EIO; } temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); if (err == -EIO) return err; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, (temp | QLC_83XX_FLASH_SPI_CTRL)); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_ADDR_TEMP_VAL); /* First DWORD write */ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_FIRST_MS_PATTERN); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } count--; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL); /* Second to N-1 DWORD writes */ while (count != 1) { qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_SECOND_MS_PATTERN); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } count--; } qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_ADDR_TEMP_VAL | (addr >> 2)); /* Last DWORD write */ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_LAST_MS_PATTERN); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) { dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); return -EIO; } ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err); if (err == -EIO) return err; if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { dev_err(&adapter->pdev->dev, "%s: failed at %d\n", __func__, __LINE__); /* Operation failed, clear error bit */ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); if (err == -EIO) return err; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, (temp | QLC_83XX_FLASH_SPI_CTRL)); } return 0; } static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter) { u32 val, id; val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); /* Check if recovery need to be performed by the calling function */ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) { val = val & ~0x3F; val = val | ((adapter->portnum << 2) | QLC_83XX_NEED_DRV_LOCK_RECOVERY); QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); dev_info(&adapter->pdev->dev, "%s: lock recovery initiated\n", __func__); mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); id = ((val >> 2) & 0xF); if (id == adapter->portnum) { val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK; val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS; QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); /* Force release the lock */ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); /* Clear recovery bits */ val = val & ~0x3F; QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); dev_info(&adapter->pdev->dev, "%s: lock recovery completed\n", __func__); } else { dev_info(&adapter->pdev->dev, "%s: func %d to resume lock recovery process\n", __func__, id); } } else { dev_info(&adapter->pdev->dev, "%s: lock recovery initiated by other functions\n", __func__); } } int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter) { u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0; int max_attempt = 0; while (status == 0) { status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK); if (status) break; mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY); i++; if (i == 1) temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) { val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); if (val == temp) { id = val & 0xFF; dev_info(&adapter->pdev->dev, "%s: lock to be recovered from %d\n", __func__, id); qlcnic_83xx_recover_driver_lock(adapter); i = 0; max_attempt++; } else { dev_err(&adapter->pdev->dev, "%s: failed to get lock\n", __func__); return -EIO; } } /* Force exit from while loop after few attempts */ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) { dev_err(&adapter->pdev->dev, "%s: failed to get lock\n", __func__); return -EIO; } } val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); lock_alive_counter = val >> 8; lock_alive_counter++; val = lock_alive_counter << 8 | adapter->portnum; QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); return 0; } void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter) { u32 val, lock_alive_counter, id; val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); id = val & 0xFF; lock_alive_counter = val >> 8; if (id != adapter->portnum) dev_err(&adapter->pdev->dev, "%s:Warning func %d is unlocking lock owned by %d\n", __func__, adapter->portnum, id); val = (lock_alive_counter << 8) | 0xFF; QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); } int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, u32 *data, u32 count) { int i, j, ret = 0; u32 temp; /* Check alignment */ if (addr & 0xF) return -EIO; mutex_lock(&adapter->ahw->mem_lock); qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0); for (i = 0; i < count; i++, addr += 16) { if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, QLCNIC_ADDR_QDR_NET_MAX)) || (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)))) { mutex_unlock(&adapter->ahw->mem_lock); return -EIO; } qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr); qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++); qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++); qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++); qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++); qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE); qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } /* Status check failure */ if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_WARNING "MS memory write failed\n"); mutex_unlock(&adapter->ahw->mem_lock); return -EIO; } } mutex_unlock(&adapter->ahw->mem_lock); return ret; } int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, u8 *p_data, int count) { u32 word, addr = flash_addr, ret; ulong indirect_addr; int i, err = 0; if (qlcnic_83xx_lock_flash(adapter) != 0) return -EIO; if (addr & 0x3) { dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); qlcnic_83xx_unlock_flash(adapter); return -EIO; } for (i = 0; i < count; i++) { if (qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, (addr))) { qlcnic_83xx_unlock_flash(adapter); return -EIO; } indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); ret = QLCRD32(adapter, indirect_addr, &err); if (err == -EIO) { qlcnic_83xx_unlock_flash(adapter); return err; } word = ret; *(u32 *)p_data = word; p_data = p_data + 4; addr = addr + 4; } qlcnic_83xx_unlock_flash(adapter); return 0; } void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; u32 config; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); if (err) return; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, "Get Link Status Command failed: 0x%x\n", err); goto out; } else { config = cmd.rsp.arg[3]; switch (QLC_83XX_SFP_MODULE_TYPE(config)) { case QLC_83XX_MODULE_FIBRE_1000BASE_SX: case QLC_83XX_MODULE_FIBRE_1000BASE_LX: case QLC_83XX_MODULE_FIBRE_1000BASE_CX: case QLC_83XX_MODULE_TP_1000BASE_T: ahw->port_type = QLCNIC_GBE; break; default: ahw->port_type = QLCNIC_XGBE; } } out: qlcnic_free_mbx_args(&cmd); } int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) { u8 pci_func; int err; u32 config = 0, state; struct qlcnic_cmd_args cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_sriov_vf_check(adapter)) pci_func = adapter->portnum; else pci_func = ahw->pci_func; state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func)); if (!QLC_83xx_FUNC_VAL(state, pci_func)) { dev_info(&adapter->pdev->dev, "link state down\n"); return config; } err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); if (err) return err; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, "Get Link Status Command failed: 0x%x\n", err); goto out; } else { config = cmd.rsp.arg[1]; switch (QLC_83XX_CURRENT_LINK_SPEED(config)) { case QLC_83XX_10M_LINK: ahw->link_speed = SPEED_10; break; case QLC_83XX_100M_LINK: ahw->link_speed = SPEED_100; break; case QLC_83XX_1G_LINK: ahw->link_speed = SPEED_1000; break; case QLC_83XX_10G_LINK: ahw->link_speed = SPEED_10000; break; default: ahw->link_speed = 0; break; } config = cmd.rsp.arg[3]; switch (QLC_83XX_SFP_MODULE_TYPE(config)) { case QLC_83XX_MODULE_FIBRE_10GBASE_LRM: case QLC_83XX_MODULE_FIBRE_10GBASE_LR: case QLC_83XX_MODULE_FIBRE_10GBASE_SR: ahw->supported_type = PORT_FIBRE; ahw->port_type = QLCNIC_XGBE; break; case QLC_83XX_MODULE_FIBRE_1000BASE_SX: case QLC_83XX_MODULE_FIBRE_1000BASE_LX: case QLC_83XX_MODULE_FIBRE_1000BASE_CX: ahw->supported_type = PORT_FIBRE; ahw->port_type = QLCNIC_GBE; break; case QLC_83XX_MODULE_TP_1000BASE_T: ahw->supported_type = PORT_TP; ahw->port_type = QLCNIC_GBE; break; case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP: case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP: case QLC_83XX_MODULE_DA_10GE_LEGACY_CP: case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP: ahw->supported_type = PORT_DA; ahw->port_type = QLCNIC_XGBE; break; default: ahw->supported_type = PORT_OTHER; ahw->port_type = QLCNIC_XGBE; } if (config & 1) err = 1; } out: qlcnic_free_mbx_args(&cmd); return config; } int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter, struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = 0; int status = 0; u32 supported, advertising; if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { /* Get port configuration info */ status = qlcnic_83xx_get_port_info(adapter); /* Get Link Status related info */ config = qlcnic_83xx_test_link(adapter); ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); } /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; if (netif_running(adapter->netdev) && ahw->has_link_events) { ecmd->base.speed = ahw->link_speed; ecmd->base.duplex = ahw->link_duplex; ecmd->base.autoneg = ahw->link_autoneg; } else { ecmd->base.speed = SPEED_UNKNOWN; ecmd->base.duplex = DUPLEX_UNKNOWN; ecmd->base.autoneg = AUTONEG_DISABLE; } supported = (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_Autoneg); ethtool_convert_link_mode_to_legacy_u32(&advertising, ecmd->link_modes.advertising); if (ecmd->base.autoneg == AUTONEG_ENABLE) { if (ahw->port_config & QLC_83XX_10_CAPABLE) advertising |= SUPPORTED_10baseT_Full; if (ahw->port_config & QLC_83XX_100_CAPABLE) advertising |= SUPPORTED_100baseT_Full; if (ahw->port_config & QLC_83XX_1G_CAPABLE) advertising |= SUPPORTED_1000baseT_Full; if (ahw->port_config & QLC_83XX_10G_CAPABLE) advertising |= SUPPORTED_10000baseT_Full; if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) advertising |= ADVERTISED_Autoneg; } else { switch (ahw->link_speed) { case SPEED_10: advertising = SUPPORTED_10baseT_Full; break; case SPEED_100: advertising = SUPPORTED_100baseT_Full; break; case SPEED_1000: advertising = SUPPORTED_1000baseT_Full; break; case SPEED_10000: advertising = SUPPORTED_10000baseT_Full; break; default: break; } } switch (ahw->supported_type) { case PORT_FIBRE: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; ecmd->base.port = PORT_FIBRE; break; case PORT_TP: supported |= SUPPORTED_TP; advertising |= ADVERTISED_TP; ecmd->base.port = PORT_TP; break; case PORT_DA: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; ecmd->base.port = PORT_DA; break; default: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; ecmd->base.port = PORT_OTHER; break; } ecmd->base.phy_address = ahw->physical_port; ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, advertising); return status; } int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter, const struct ethtool_link_ksettings *ecmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = adapter->ahw->port_config; int status = 0; /* 83xx devices do not support Half duplex */ if (ecmd->base.duplex == DUPLEX_HALF) { netdev_info(adapter->netdev, "Half duplex mode not supported\n"); return -EINVAL; } if (ecmd->base.autoneg) { ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; ahw->port_config |= (QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | QLC_83XX_10G_CAPABLE); } else { /* force speed */ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; switch (ecmd->base.speed) { case SPEED_10: ahw->port_config &= ~(QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE | QLC_83XX_10G_CAPABLE); ahw->port_config |= QLC_83XX_10_CAPABLE; break; case SPEED_100: ahw->port_config &= ~(QLC_83XX_10_CAPABLE | QLC_83XX_1G_CAPABLE | QLC_83XX_10G_CAPABLE); ahw->port_config |= QLC_83XX_100_CAPABLE; break; case SPEED_1000: ahw->port_config &= ~(QLC_83XX_10_CAPABLE | QLC_83XX_100_CAPABLE | QLC_83XX_10G_CAPABLE); ahw->port_config |= QLC_83XX_1G_CAPABLE; break; case SPEED_10000: ahw->port_config &= ~(QLC_83XX_10_CAPABLE | QLC_83XX_100_CAPABLE | QLC_83XX_1G_CAPABLE); ahw->port_config |= QLC_83XX_10G_CAPABLE; break; default: return -EINVAL; } } status = qlcnic_83xx_set_port_config(adapter); if (status) { netdev_info(adapter->netdev, "Failed to Set Link Speed and autoneg.\n"); ahw->port_config = config; } return status; } static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd, u64 *data, int index) { u32 low, hi; u64 val; low = cmd->rsp.arg[index]; hi = cmd->rsp.arg[index + 1]; val = (((u64) low) | (((u64) hi) << 32)); *data++ = val; return data; } static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd, u64 *data, int type, int *ret) { int err, k, total_regs; *ret = 0; err = qlcnic_issue_cmd(adapter, cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_info(&adapter->pdev->dev, "Error in get statistics mailbox command\n"); *ret = -EIO; return data; } total_regs = cmd->rsp.num; switch (type) { case QLC_83XX_STAT_MAC: /* fill in MAC tx counters */ for (k = 2; k < 28; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* skip 24 bytes of reserved area */ /* fill in MAC rx counters */ for (k += 6; k < 60; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* skip 24 bytes of reserved area */ /* fill in MAC rx frame stats */ for (k += 6; k < 80; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* fill in eSwitch stats */ for (; k < total_regs; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); break; case QLC_83XX_STAT_RX: for (k = 2; k < 8; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* skip 8 bytes of reserved data */ for (k += 2; k < 24; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* skip 8 bytes containing RE1FBQ error data */ for (k += 2; k < total_regs; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); break; case QLC_83XX_STAT_TX: for (k = 2; k < 10; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); /* skip 8 bytes of reserved data */ for (k += 2; k < total_regs; k += 2) data = qlcnic_83xx_copy_stats(cmd, data, k); break; default: dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n"); *ret = -EIO; } return data; } void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) { struct qlcnic_cmd_args cmd; struct net_device *netdev = adapter->netdev; int ret = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); if (ret) return; /* Get Tx stats */ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); cmd.rsp.num = QLC_83XX_TX_STAT_REGS; data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_TX, &ret); if (ret) { netdev_err(netdev, "Error getting Tx stats\n"); goto out; } /* Get MAC stats */ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16); cmd.rsp.num = QLC_83XX_MAC_STAT_REGS; memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_MAC, &ret); if (ret) { netdev_err(netdev, "Error getting MAC stats\n"); goto out; } /* Get Rx stats */ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16; cmd.rsp.num = QLC_83XX_RX_STAT_REGS; memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); data = qlcnic_83xx_fill_stats(adapter, &cmd, data, QLC_83XX_STAT_RX, &ret); if (ret) netdev_err(netdev, "Error getting Rx stats\n"); out: qlcnic_free_mbx_args(&cmd); } #define QLCNIC_83XX_ADD_PORT0 BIT_0 #define QLCNIC_83XX_ADD_PORT1 BIT_1 #define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */ int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP); if (err) return err; cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1); cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE; cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "failed to issue extend iSCSI minidump capability\n"); return err; } int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) { u32 major, minor, sub; major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) { dev_info(&adapter->pdev->dev, "%s: Reg test failed\n", __func__); return 1; } return 0; } inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) { return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) * sizeof(*adapter->ahw->ext_reg_tbl)) + (ARRAY_SIZE(qlcnic_83xx_reg_tbl) * sizeof(*adapter->ahw->reg_tbl)); } int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) { int i, j = 0; for (i = QLCNIC_DEV_INFO_SIZE + 1; j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++) regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j); for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++) regs_buff[i++] = QLCRDX(adapter->ahw, j); return i; } int qlcnic_83xx_interrupt_test(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_cmd_args cmd; u8 val, drv_sds_rings = adapter->drv_sds_rings; u8 drv_tx_rings = adapter->drv_tx_rings; u32 data; u16 intrpt_id, id; int ret; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting\n"); return -EBUSY; } if (qlcnic_get_diag_lock(adapter)) { netdev_info(netdev, "Device in diagnostics mode\n"); return -EBUSY; } ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, drv_sds_rings); if (ret) goto fail_diag_irq; ahw->diag_cnt = 0; ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); if (ret) goto fail_mbx_args; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; else intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); cmd.req.arg[1] = 1; cmd.req.arg[2] = intrpt_id; cmd.req.arg[3] = BIT_0; ret = qlcnic_issue_cmd(adapter, &cmd); data = cmd.rsp.arg[2]; id = LSW(data); val = LSB(MSW(data)); if (id != intrpt_id) dev_info(&adapter->pdev->dev, "Interrupt generated: 0x%x, requested:0x%x\n", id, intrpt_id); if (val) dev_err(&adapter->pdev->dev, "Interrupt test error: 0x%x\n", val); if (ret) goto done; msleep(20); ret = !ahw->diag_cnt; done: qlcnic_free_mbx_args(&cmd); fail_mbx_args: qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); fail_diag_irq: adapter->drv_sds_rings = drv_sds_rings; adapter->drv_tx_rings = drv_tx_rings; qlcnic_release_diag_lock(adapter); return ret; } void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter, struct ethtool_pauseparam *pause) { struct qlcnic_hardware_context *ahw = adapter->ahw; int status = 0; u32 config; status = qlcnic_83xx_get_port_config(adapter); if (status) { dev_err(&adapter->pdev->dev, "%s: Get Pause Config failed\n", __func__); return; } config = ahw->port_config; if (config & QLC_83XX_CFG_STD_PAUSE) { switch (MSW(config)) { case QLC_83XX_TX_PAUSE: pause->tx_pause = 1; break; case QLC_83XX_RX_PAUSE: pause->rx_pause = 1; break; case QLC_83XX_TX_RX_PAUSE: default: /* Backward compatibility for existing * flash definitions */ pause->tx_pause = 1; pause->rx_pause = 1; } } if (QLC_83XX_AUTONEG(config)) pause->autoneg = 1; } int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter, struct ethtool_pauseparam *pause) { struct qlcnic_hardware_context *ahw = adapter->ahw; int status = 0; u32 config; status = qlcnic_83xx_get_port_config(adapter); if (status) { dev_err(&adapter->pdev->dev, "%s: Get Pause Config failed.\n", __func__); return status; } config = ahw->port_config; if (ahw->port_type == QLCNIC_GBE) { if (pause->autoneg) ahw->port_config |= QLC_83XX_ENABLE_AUTONEG; if (!pause->autoneg) ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG; } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) { return -EOPNOTSUPP; } if (!(config & QLC_83XX_CFG_STD_PAUSE)) ahw->port_config |= QLC_83XX_CFG_STD_PAUSE; if (pause->rx_pause && pause->tx_pause) { ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE; } else if (pause->rx_pause && !pause->tx_pause) { ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE; ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE; } else if (pause->tx_pause && !pause->rx_pause) { ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE; ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE; } else if (!pause->rx_pause && !pause->tx_pause) { ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE | QLC_83XX_CFG_STD_PAUSE); } status = qlcnic_83xx_set_port_config(adapter); if (status) { dev_err(&adapter->pdev->dev, "%s: Set Pause Config failed.\n", __func__); ahw->port_config = config; } return status; } static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) { int ret, err = 0; u32 temp; qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, QLC_83XX_FLASH_OEM_READ_SIG); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, QLC_83XX_FLASH_READ_CTRL); ret = qlcnic_83xx_poll_flash_status_reg(adapter); if (ret) return -EIO; temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); if (err == -EIO) return err; return temp & 0xFF; } int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) { int status; status = qlcnic_83xx_read_flash_status_reg(adapter); if (status == -EIO) { dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n", __func__); return 1; } return 0; } static int qlcnic_83xx_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); qlcnic_83xx_disable_mbx_intr(adapter); cancel_delayed_work_sync(&adapter->idc_aen_work); return pci_save_state(pdev); } static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; int err = 0; err = qlcnic_83xx_idc_init(adapter); if (err) return err; if (ahw->nic_mode == QLCNIC_VNIC_MODE) { if (ahw->op_mode == QLCNIC_MGMT_FUNC) { qlcnic_83xx_set_vnic_opmode(adapter); } else { err = qlcnic_83xx_check_vnic_state(adapter); if (err) return err; } } err = qlcnic_83xx_idc_reattach_driver(adapter); if (err) return err; qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, idc->delay); return err; } void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) { reinit_completion(&mbx->completion); set_bit(QLC_83XX_MBX_READY, &mbx->status); } void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx) { if (!mbx) return; destroy_workqueue(mbx->work_q); kfree(mbx); } static inline void qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { qlcnic_free_mbx_args(cmd); kfree(cmd); return; } complete(&cmd->completion); } static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct list_head *head = &mbx->cmd_q; struct qlcnic_cmd_args *cmd = NULL; spin_lock_bh(&mbx->queue_lock); while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n", __func__, cmd->cmd_op); list_del(&cmd->list); mbx->num_cmds--; qlcnic_83xx_notify_cmd_completion(adapter, cmd); } spin_unlock_bh(&mbx->queue_lock); } static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; u32 host_mbx_ctrl; if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) return -EBUSY; host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); if (host_mbx_ctrl) { clear_bit(QLC_83XX_MBX_READY, &mbx->status); ahw->idc.collect_dump = 1; return -EIO; } return 0; } static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter, u8 issue_cmd) { if (issue_cmd) QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); else QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); } static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; spin_lock_bh(&mbx->queue_lock); list_del(&cmd->list); mbx->num_cmds--; spin_unlock_bh(&mbx->queue_lock); qlcnic_83xx_notify_cmd_completion(adapter, cmd); } static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp; struct qlcnic_hardware_context *ahw = adapter->ahw; int i, j; if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) { mbx_cmd = cmd->req.arg[0]; writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); for (i = 1; i < cmd->req.num; i++) writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); } else { fw_hal_version = ahw->fw_hal_version; hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32); total_size = cmd->pay_size + hdr_size; tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16; mbx_cmd = tmp | fw_hal_version << 29; writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); /* Back channel specific operations bits */ mbx_cmd = 0x1 | 1 << 4; if (qlcnic_sriov_pf_check(adapter)) mbx_cmd |= cmd->func_num << 5; writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); for (i = 2, j = 0; j < hdr_size; i++, j++) writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i)); for (j = 0; j < cmd->pay_size; j++, i++) writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i)); } } void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; if (!mbx) return; clear_bit(QLC_83XX_MBX_READY, &mbx->status); complete(&mbx->completion); cancel_work_sync(&mbx->work); flush_workqueue(mbx->work_q); qlcnic_83xx_flush_mbx_queue(adapter); } static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd, unsigned long *timeout) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) { atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); init_completion(&cmd->completion); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; spin_lock_bh(&mbx->queue_lock); list_add_tail(&cmd->list, &mbx->cmd_q); mbx->num_cmds++; cmd->total_cmds = mbx->num_cmds; *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; queue_work(mbx->work_q, &mbx->work); spin_unlock_bh(&mbx->queue_lock); return 0; } return -EBUSY; } static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { u8 mac_cmd_rcode; u32 fw_data; if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) { fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); mac_cmd_rcode = (u8)fw_data; if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || mac_cmd_rcode == QLC_83XX_MAC_PRESENT || mac_cmd_rcode == QLC_83XX_MAC_ABSENT) { cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; return QLCNIC_RCODE_SUCCESS; } } return -EINVAL; } static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct device *dev = &adapter->pdev->dev; u8 mbx_err_code; u32 fw_data; fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); mbx_err_code = QLCNIC_MBX_STATUS(fw_data); qlcnic_83xx_get_mbx_data(adapter, cmd); switch (mbx_err_code) { case QLCNIC_MBX_RSP_OK: case QLCNIC_MBX_PORT_RSP_OK: cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; break; default: if (!qlcnic_83xx_check_mac_rcode(adapter, cmd)) break; dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n", __func__, cmd->cmd_op, cmd->type, ahw->pci_func, ahw->op_mode, mbx_err_code); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED; qlcnic_dump_mbx(adapter, cmd); } return; } static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; u32 offset; offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", readl(ahw->pci_base0 + offset), QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); } static void qlcnic_83xx_mailbox_worker(struct work_struct *work) { struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, work); struct qlcnic_adapter *adapter = mbx->adapter; const struct qlcnic_mbx_ops *mbx_ops = mbx->ops; struct device *dev = &adapter->pdev->dev; struct list_head *head = &mbx->cmd_q; struct qlcnic_hardware_context *ahw; struct qlcnic_cmd_args *cmd = NULL; unsigned long flags; ahw = adapter->ahw; while (true) { if (qlcnic_83xx_check_mbx_status(adapter)) { qlcnic_83xx_flush_mbx_queue(adapter); return; } spin_lock_irqsave(&mbx->aen_lock, flags); mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_unlock_irqrestore(&mbx->aen_lock, flags); spin_lock_bh(&mbx->queue_lock); if (list_empty(head)) { spin_unlock_bh(&mbx->queue_lock); return; } cmd = list_entry(head->next, struct qlcnic_cmd_args, list); spin_unlock_bh(&mbx->queue_lock); mbx_ops->encode_cmd(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); if (wait_for_completion_timeout(&mbx->completion, QLC_83XX_MBX_TIMEOUT)) { mbx_ops->decode_resp(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION); } else { dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n", __func__, cmd->cmd_op, cmd->type, ahw->pci_func, ahw->op_mode); clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_dump_mailbox_registers(adapter); qlcnic_83xx_get_mbx_data(adapter, cmd); qlcnic_dump_mbx(adapter, cmd); qlcnic_83xx_idc_request_reset(adapter, QLCNIC_FORCE_FW_DUMP_KEY); cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT; } mbx_ops->dequeue_cmd(adapter, cmd); } } static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd, .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd, .decode_resp = qlcnic_83xx_decode_mbx_rsp, .encode_cmd = qlcnic_83xx_encode_mbx_cmd, .nofity_fw = qlcnic_83xx_signal_mbx_cmd, }; int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx; ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL); if (!ahw->mailbox) return -ENOMEM; mbx = ahw->mailbox; mbx->ops = &qlcnic_83xx_mbx_ops; mbx->adapter = adapter; spin_lock_init(&mbx->queue_lock); spin_lock_init(&mbx->aen_lock); INIT_LIST_HEAD(&mbx->cmd_q); init_completion(&mbx->completion); mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox"); if (mbx->work_q == NULL) { kfree(mbx); return -ENOMEM; } INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker); set_bit(QLC_83XX_MBX_READY, &mbx->status); return 0; } static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (state == pci_channel_io_normal) return PCI_ERS_RESULT_RECOVERED; set_bit(__QLCNIC_AER, &adapter->state); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_aer_stop_poll_work(adapter); pci_save_state(pdev); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); int err = 0; pdev->error_state = pci_channel_io_normal; err = pci_enable_device(pdev); if (err) goto disconnect; pci_set_power_state(pdev, PCI_D0); pci_set_master(pdev); pci_restore_state(pdev); err = qlcnic_83xx_aer_reset(adapter); if (err == 0) return PCI_ERS_RESULT_RECOVERED; disconnect: clear_bit(__QLCNIC_AER, &adapter->state); clear_bit(__QLCNIC_RESETTING, &adapter->state); return PCI_ERS_RESULT_DISCONNECT; } static void qlcnic_83xx_io_resume(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); if (test_and_clear_bit(__QLCNIC_AER, &adapter->state)) qlcnic_83xx_aer_start_poll_work(adapter); }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/types.h> #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_83xx_hw.h" #define QLC_BC_COMMAND 0 #define QLC_BC_RESPONSE 1 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ) #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) #define QLC_BC_MSG 0 #define QLC_BC_CFREE 1 #define QLC_BC_FLR 2 #define QLC_BC_HDR_SZ 16 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8); static void qlcnic_sriov_process_bc_cmd(struct work_struct *); static int qlcnic_sriov_vf_shutdown(struct pci_dev *); static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .read_crb = qlcnic_83xx_read_crb, .write_crb = qlcnic_83xx_write_crb, .read_reg = qlcnic_83xx_rd_reg_indirect, .write_reg = qlcnic_83xx_wrt_reg_indirect, .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, .mbx_cmd = qlcnic_sriov_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_83xx_create_rx_ctx, .create_tx_ctx = qlcnic_83xx_create_tx_ctx, .del_rx_ctx = qlcnic_83xx_del_rx_ctx, .del_tx_ctx = qlcnic_83xx_del_tx_ctx, .setup_link_event = qlcnic_83xx_setup_link_event, .get_nic_info = qlcnic_83xx_get_nic_info, .get_pci_info = qlcnic_83xx_get_pci_info, .set_nic_info = qlcnic_83xx_set_nic_info, .change_macvlan = qlcnic_83xx_sre_macaddr_change, .napi_enable = qlcnic_83xx_napi_enable, .napi_disable = qlcnic_83xx_napi_disable, .config_intr_coal = qlcnic_83xx_config_intr_coal, .config_rss = qlcnic_83xx_config_rss, .config_hw_lro = qlcnic_83xx_config_hw_lro, .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, .free_mac_list = qlcnic_sriov_vf_free_mac_list, .enable_sds_intr = qlcnic_83xx_enable_sds_intr, .disable_sds_intr = qlcnic_83xx_disable_sds_intr, .encap_rx_offload = qlcnic_83xx_encap_rx_offload, .encap_tx_offload = qlcnic_83xx_encap_tx_offload, }; static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { .config_bridged_mode = qlcnic_config_bridged_mode, .config_led = qlcnic_config_led, .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, .shutdown = qlcnic_sriov_vf_shutdown, .resume = qlcnic_sriov_vf_resume, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, }; static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, {QLCNIC_BC_CMD_GET_ACL, 3, 14}, {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, }; static inline bool qlcnic_sriov_bc_msg_check(u32 val) { return (val & (1 << QLC_BC_MSG)) ? true : false; } static inline bool qlcnic_sriov_channel_free_check(u32 val) { return (val & (1 << QLC_BC_CFREE)) ? true : false; } static inline bool qlcnic_sriov_flr_check(u32 val) { return (val & (1 << QLC_BC_FLR)) ? true : false; } static inline u8 qlcnic_sriov_target_func_id(u32 val) { return (val >> 4) & 0xff; } static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) { struct pci_dev *dev = adapter->pdev; int pos; u16 stride, offset; if (qlcnic_sriov_vf_check(adapter)) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); if (!pos) return 0; pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); return (dev->devfn + offset + stride * vf_id) & 0xff; } int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) { struct qlcnic_sriov *sriov; struct qlcnic_back_channel *bc; struct workqueue_struct *wq; struct qlcnic_vport *vp; struct qlcnic_vf_info *vf; int err, i; if (!qlcnic_sriov_enable_check(adapter)) return -EIO; sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); if (!sriov) return -ENOMEM; adapter->ahw->sriov = sriov; sriov->num_vfs = num_vfs; bc = &sriov->bc; sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info), GFP_KERNEL); if (!sriov->vf_info) { err = -ENOMEM; goto qlcnic_free_sriov; } wq = create_singlethread_workqueue("bc-trans"); if (wq == NULL) { err = -ENOMEM; dev_err(&adapter->pdev->dev, "Cannot create bc-trans workqueue\n"); goto qlcnic_free_vf_info; } bc->bc_trans_wq = wq; wq = create_singlethread_workqueue("async"); if (wq == NULL) { err = -ENOMEM; dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); goto qlcnic_destroy_trans_wq; } bc->bc_async_wq = wq; INIT_LIST_HEAD(&bc->async_cmd_list); INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); spin_lock_init(&bc->queue_lock); bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; vf->adapter = adapter; vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); mutex_init(&vf->send_cmd_lock); spin_lock_init(&vf->vlan_list_lock); INIT_LIST_HEAD(&vf->rcv_act.wait_list); INIT_LIST_HEAD(&vf->rcv_pend.wait_list); spin_lock_init(&vf->rcv_act.lock); spin_lock_init(&vf->rcv_pend.lock); init_completion(&vf->ch_free_cmpl); INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); if (qlcnic_sriov_pf_check(adapter)) { vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); if (!vp) { err = -ENOMEM; goto qlcnic_destroy_async_wq; } sriov->vf_info[i].vp = vp; vp->vlan_mode = QLC_GUEST_VLAN_MODE; vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); } } return 0; qlcnic_destroy_async_wq: while (i--) kfree(sriov->vf_info[i].vp); destroy_workqueue(bc->bc_async_wq); qlcnic_destroy_trans_wq: destroy_workqueue(bc->bc_trans_wq); qlcnic_free_vf_info: kfree(sriov->vf_info); qlcnic_free_sriov: kfree(adapter->ahw->sriov); return err; } void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) { struct qlcnic_bc_trans *trans; struct qlcnic_cmd_args cmd; unsigned long flags; spin_lock_irqsave(&t_list->lock, flags); while (!list_empty(&t_list->wait_list)) { trans = list_first_entry(&t_list->wait_list, struct qlcnic_bc_trans, list); list_del(&trans->list); t_list->count--; cmd.req.arg = (u32 *)trans->req_pay; cmd.rsp.arg = (u32 *)trans->rsp_pay; qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); } spin_unlock_irqrestore(&t_list->lock, flags); } void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_back_channel *bc = &sriov->bc; struct qlcnic_vf_info *vf; int i; if (!qlcnic_sriov_enable_check(adapter)) return; qlcnic_sriov_cleanup_async_list(bc); destroy_workqueue(bc->bc_async_wq); for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; qlcnic_sriov_cleanup_list(&vf->rcv_pend); cancel_work_sync(&vf->trans_work); qlcnic_sriov_cleanup_list(&vf->rcv_act); } destroy_workqueue(bc->bc_trans_wq); for (i = 0; i < sriov->num_vfs; i++) kfree(sriov->vf_info[i].vp); kfree(sriov->vf_info); kfree(adapter->ahw->sriov); } static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) { qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); qlcnic_sriov_cfg_bc_intr(adapter, 0); __qlcnic_sriov_cleanup(adapter); } void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) { if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) return; qlcnic_sriov_free_vlans(adapter); if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_cleanup(adapter); if (qlcnic_sriov_vf_check(adapter)) qlcnic_sriov_vf_cleanup(adapter); } static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct qlcnic_cmd_args cmd; unsigned long timeout; int err; memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd.hdr = hdr; cmd.pay = pay; cmd.pay_size = size; cmd.func_num = pci_func; cmd.op_type = QLC_83XX_MBX_POST_BC_OP; cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); if (err) { dev_err(&adapter->pdev->dev, "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, cmd.cmd_op, cmd.type, ahw->pci_func, ahw->op_mode); return err; } if (!wait_for_completion_timeout(&cmd.completion, timeout)) { dev_err(&adapter->pdev->dev, "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", __func__, cmd.cmd_op, cmd.type, ahw->pci_func, ahw->op_mode); flush_workqueue(mbx->work_q); } return cmd.rsp_opcode; } static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) { adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u16 vport_id) { struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; int err; u32 status; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); if (err) return err; cmd.req.arg[1] = vport_id << 16 | 0x1; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to get vport info, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } status = cmd.rsp.arg[2] & 0xffff; if (status & BIT_0) npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); if (status & BIT_1) npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); if (status & BIT_2) npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); if (status & BIT_3) npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); if (status & BIT_4) npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); if (status & BIT_5) npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); if (status & BIT_6) npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); if (status & BIT_7) npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); if (status & BIT_8) npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); if (status & BIT_9) npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", npar_info->min_tx_bw, npar_info->max_tx_bw, npar_info->max_tx_ques, npar_info->max_tx_mac_filters, npar_info->max_rx_mcast_mac_filters, npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, npar_info->max_rx_buf_rings, npar_info->max_rx_ques, npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, npar_info->max_remote_ipv6_addrs); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; adapter->flags &= ~QLCNIC_TAGGING_ENABLED; return 0; } static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; int i, num_vlans, ret; u16 *vlans; if (sriov->allowed_vlans) return 0; sriov->any_vlan = cmd->rsp.arg[2] & 0xf; sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", sriov->num_allowed_vlans); ret = qlcnic_sriov_alloc_vlans(adapter); if (ret) return ret; if (!sriov->any_vlan) return 0; num_vlans = sriov->num_allowed_vlans; sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL); if (!sriov->allowed_vlans) return -ENOMEM; vlans = (u16 *)&cmd->rsp.arg[3]; for (i = 0; i < num_vlans; i++) sriov->allowed_vlans[i] = vlans[i]; return 0; } static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_cmd_args cmd; int ret = 0; memset(&cmd, 0, sizeof(cmd)); ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); if (ret) return ret; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", ret); } else { sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; switch (sriov->vlan_mode) { case QLC_GUEST_VLAN_MODE: ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); break; case QLC_PVID_MODE: ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); break; } } qlcnic_free_mbx_args(&cmd); return ret; } static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_info nic_info; int err; err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); if (err) return err; ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters; err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); if (err) return -EIO; if (qlcnic_83xx_get_port_info(adapter)) return -EIO; qlcnic_sriov_vf_cfg_buff_desc(adapter); adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d\n", adapter->ahw->fw_hal_version); ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_mtu = nic_info.max_mtu; ahw->op_mode = nic_info.op_mode; ahw->capabilities = nic_info.capabilities; return 0; } static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter) { int err; adapter->flags |= QLCNIC_VLAN_FILTERING; adapter->ahw->total_nic_func = 1; INIT_LIST_HEAD(&adapter->vf_mc_list); if (!qlcnic_use_msi_x && !!qlcnic_use_msi) dev_warn(&adapter->pdev->dev, "Device does not support MSI interrupts\n"); /* compute and set default and max tx/sds rings */ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); err = qlcnic_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; } err = qlcnic_83xx_setup_mbx_intr(adapter); if (err) goto err_out_disable_msi; err = qlcnic_sriov_init(adapter, 1); if (err) goto err_out_disable_mbx_intr; err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) goto err_out_cleanup_sriov; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (err) goto err_out_disable_bc_intr; err = qlcnic_sriov_vf_init_driver(adapter); if (err) goto err_out_send_channel_term; err = qlcnic_sriov_get_vf_acl(adapter); if (err) goto err_out_send_channel_term; err = qlcnic_setup_netdev(adapter, adapter->netdev); if (err) goto err_out_send_channel_term; pci_set_drvdata(adapter->pdev, adapter); dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, adapter->ahw->idc.delay); return 0; err_out_send_channel_term: qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); err_out_disable_bc_intr: qlcnic_sriov_cfg_bc_intr(adapter, 0); err_out_cleanup_sriov: __qlcnic_sriov_cleanup(adapter); err_out_disable_mbx_intr: qlcnic_83xx_free_mbx_intr(adapter); err_out_disable_msi: qlcnic_teardown_intr(adapter); return err; } static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) { u32 state; do { msleep(20); if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) return -EIO; state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); } while (state != QLC_83XX_IDC_DEV_READY); return 0; } int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->reset_context = 0; adapter->fw_fail_cnt = 0; ahw->msix_supported = 1; adapter->need_fw_reset = 0; adapter->flags |= QLCNIC_TX_INTR_SHARED; err = qlcnic_sriov_check_dev_ready(adapter); if (err) return err; err = qlcnic_sriov_setup_vf(adapter); if (err) return err; if (qlcnic_read_mac_addr(adapter)) dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; dev_info(&adapter->pdev->dev, "HAL Version: %d Non Privileged SRIOV function\n", ahw->fw_hal_version); adapter->nic_ops = &qlcnic_sriov_vf_ops; set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); return; } void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) { ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; } static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) { u32 pay_size; pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); if (pay_size) pay_size = QLC_BC_PAYLOAD_SZ; else pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; return pay_size; } int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) { struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; u8 i; if (qlcnic_sriov_vf_check(adapter)) return 0; for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { if (vf_info[i].pci_func == pci_func) return i; } return -EINVAL; } static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) { *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); if (!*trans) return -ENOMEM; init_completion(&(*trans)->resp_cmpl); return 0; } static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, u32 size) { *hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC); if (!*hdr) return -ENOMEM; return 0; } static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) { const struct qlcnic_mailbox_metadata *mbx_tbl; int i, size; mbx_tbl = qlcnic_sriov_bc_mbx_tbl; size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { mbx->op_type = QLC_BC_CMD; mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), GFP_ATOMIC); if (!mbx->req.arg) return -ENOMEM; mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), GFP_ATOMIC); if (!mbx->rsp.arg) { kfree(mbx->req.arg); mbx->req.arg = NULL; return -ENOMEM; } mbx->req.arg[0] = (type | (mbx->req.num << 16) | (3 << 29)); mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; return 0; } } return -EINVAL; } static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd, u16 seq, u8 msg_type) { struct qlcnic_bc_hdr *hdr; int i; u32 num_regs, bc_pay_sz; u16 remainder; u8 cmd_op, num_frags, t_num_frags; bc_pay_sz = QLC_BC_PAYLOAD_SZ; if (msg_type == QLC_BC_COMMAND) { trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; num_regs = cmd->req.num; trans->req_pay_size = (num_regs * 4); num_regs = cmd->rsp.num; trans->rsp_pay_size = (num_regs * 4); cmd_op = cmd->req.arg[0] & 0xff; remainder = (trans->req_pay_size) % (bc_pay_sz); num_frags = (trans->req_pay_size) / (bc_pay_sz); if (remainder) num_frags++; t_num_frags = num_frags; if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) return -ENOMEM; remainder = (trans->rsp_pay_size) % (bc_pay_sz); num_frags = (trans->rsp_pay_size) / (bc_pay_sz); if (remainder) num_frags++; if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) return -ENOMEM; num_frags = t_num_frags; hdr = trans->req_hdr; } else { cmd->req.arg = (u32 *)trans->req_pay; cmd->rsp.arg = (u32 *)trans->rsp_pay; cmd_op = cmd->req.arg[0] & 0xff; cmd->cmd_op = cmd_op; remainder = (trans->rsp_pay_size) % (bc_pay_sz); num_frags = (trans->rsp_pay_size) / (bc_pay_sz); if (remainder) num_frags++; cmd->req.num = trans->req_pay_size / 4; cmd->rsp.num = trans->rsp_pay_size / 4; hdr = trans->rsp_hdr; cmd->op_type = trans->req_hdr->op_type; } trans->trans_id = seq; trans->cmd_id = cmd_op; for (i = 0; i < num_frags; i++) { hdr[i].version = 2; hdr[i].msg_type = msg_type; hdr[i].op_type = cmd->op_type; hdr[i].num_cmds = 1; hdr[i].num_frags = num_frags; hdr[i].frag_num = i + 1; hdr[i].cmd_op = cmd_op; hdr[i].seq_id = seq; } return 0; } static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) { if (!trans) return; kfree(trans->req_hdr); kfree(trans->rsp_hdr); kfree(trans); } static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_trans_list *t_list; unsigned long flags; int ret = 0; if (type == QLC_BC_RESPONSE) { t_list = &vf->rcv_act; spin_lock_irqsave(&t_list->lock, flags); t_list->count--; list_del(&trans->list); if (t_list->count > 0) ret = 1; spin_unlock_irqrestore(&t_list->lock, flags); } if (type == QLC_BC_COMMAND) { while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) msleep(100); vf->send_cmd = NULL; clear_bit(QLC_BC_VF_SEND, &vf->state); } return ret; } static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, work_func_t func) { if (test_bit(QLC_BC_VF_FLR, &vf->state) || vf->adapter->need_fw_reset) return; queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); } static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) { struct completion *cmpl = &trans->resp_cmpl; if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) trans->trans_state = QLC_END; else trans->trans_state = QLC_ABORT; return; } static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, u8 type) { if (type == QLC_BC_RESPONSE) { trans->curr_rsp_frag++; if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) trans->trans_state = QLC_INIT; else trans->trans_state = QLC_END; } else { trans->curr_req_frag++; if (trans->curr_req_frag < trans->req_hdr->num_frags) trans->trans_state = QLC_INIT; else trans->trans_state = QLC_WAIT_FOR_RESP; } } static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; struct completion *cmpl = &vf->ch_free_cmpl; if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { trans->trans_state = QLC_ABORT; return; } clear_bit(QLC_BC_VF_CHANNEL, &vf->state); qlcnic_sriov_handle_multi_frags(trans, type); } static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u32 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; u8 i, max = 2, hdr_size, j; hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); max = (size / sizeof(u32)) + hdr_size; for (i = 2, j = 0; j < hdr_size; i++, j++) *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); for (; j < max; i++, j++) *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); } static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) { int ret = -EBUSY; u32 timeout = 10000; do { if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { ret = 0; break; } mdelay(1); } while (--timeout); return ret; } static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; u32 pay_size; u32 *hdr, *pay; int ret; u8 pci_func = trans->func_id; if (__qlcnic_sriov_issue_bc_post(vf)) return -EBUSY; if (type == QLC_BC_COMMAND) { hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); pay = (u32 *)(trans->req_pay + trans->curr_req_frag); pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); pay_size = (pay_size / sizeof(u32)); } else { hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); pay_size = (pay_size / sizeof(u32)); } ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, pci_func, pay_size); return ret; } static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf, u8 type) { bool flag = true; int err = -EIO; while (flag) { if (test_bit(QLC_BC_VF_FLR, &vf->state) || vf->adapter->need_fw_reset) trans->trans_state = QLC_ABORT; switch (trans->trans_state) { case QLC_INIT: trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; if (qlcnic_sriov_issue_bc_post(trans, type)) trans->trans_state = QLC_ABORT; break; case QLC_WAIT_FOR_CHANNEL_FREE: qlcnic_sriov_wait_for_channel_free(trans, type); break; case QLC_WAIT_FOR_RESP: qlcnic_sriov_wait_for_resp(trans); break; case QLC_END: err = 0; flag = false; break; case QLC_ABORT: err = -EIO; flag = false; clear_bit(QLC_BC_VF_CHANNEL, &vf->state); break; default: err = -EIO; flag = false; } } return err; } static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, int pci_func) { struct qlcnic_vf_info *vf; int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); if (index < 0) return -EIO; vf = &adapter->ahw->sriov->vf_info[index]; trans->vf = vf; trans->func_id = pci_func; if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { if (qlcnic_sriov_pf_check(adapter)) return -EIO; if (qlcnic_sriov_vf_check(adapter) && trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) return -EIO; } mutex_lock(&vf->send_cmd_lock); vf->send_cmd = trans; err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); mutex_unlock(&vf->send_cmd_lock); return err; } static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { #ifdef CONFIG_QLCNIC_SRIOV if (qlcnic_sriov_pf_check(adapter)) { qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); return; } #endif cmd->rsp.arg[0] |= (0x9 << 25); return; } static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) { struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, trans_work); struct qlcnic_bc_trans *trans = NULL; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_cmd_args cmd; u8 req; if (adapter->need_fw_reset) return; if (test_bit(QLC_BC_VF_FLR, &vf->state)) return; memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); trans = list_first_entry(&vf->rcv_act.wait_list, struct qlcnic_bc_trans, list); adapter = vf->adapter; if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, QLC_BC_RESPONSE)) goto cleanup_trans; __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); trans->trans_state = QLC_INIT; __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); cleanup_trans: qlcnic_free_mbx_args(&cmd); req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); qlcnic_sriov_cleanup_transaction(trans); if (req) qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, qlcnic_sriov_process_bc_cmd); } static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, struct qlcnic_vf_info *vf) { struct qlcnic_bc_trans *trans; u32 pay_size; if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) return; trans = vf->send_cmd; if (trans == NULL) goto clear_send; if (trans->trans_id != hdr->seq_id) goto clear_send; pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), pay_size); if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) goto clear_send; complete(&trans->resp_cmpl); clear_send: clear_bit(QLC_BC_VF_SEND, &vf->state); } int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; t_list->count++; list_add_tail(&trans->list, &t_list->wait_list); if (t_list->count == 1) qlcnic_sriov_schedule_bc_cmd(sriov, vf, qlcnic_sriov_process_bc_cmd); return 0; } static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; spin_lock(&t_list->lock); __qlcnic_sriov_add_act_list(sriov, vf, trans); spin_unlock(&t_list->lock); return 0; } static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_hdr *hdr) { struct qlcnic_bc_trans *trans = NULL; struct list_head *node; u32 pay_size, curr_frag; u8 found = 0, active = 0; spin_lock(&vf->rcv_pend.lock); if (vf->rcv_pend.count > 0) { list_for_each(node, &vf->rcv_pend.wait_list) { trans = list_entry(node, struct qlcnic_bc_trans, list); if (trans->trans_id == hdr->seq_id) { found = 1; break; } } } if (found) { curr_frag = trans->curr_req_frag; pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, curr_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->req_hdr + curr_frag), (u32 *)(trans->req_pay + curr_frag), pay_size); trans->curr_req_frag++; if (trans->curr_req_frag >= hdr->num_frags) { vf->rcv_pend.count--; list_del(&trans->list); active = 1; } } spin_unlock(&vf->rcv_pend.lock); if (active) if (qlcnic_sriov_add_act_list(sriov, vf, trans)) qlcnic_sriov_cleanup_transaction(trans); return; } static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, struct qlcnic_bc_hdr *hdr, struct qlcnic_vf_info *vf) { struct qlcnic_bc_trans *trans; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_cmd_args cmd; u32 pay_size; int err; u8 cmd_op; if (adapter->need_fw_reset) return; if (!test_bit(QLC_BC_VF_STATE, &vf->state) && hdr->op_type != QLC_BC_CMD && hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) return; if (hdr->frag_num > 1) { qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); return; } memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd_op = hdr->cmd_op; if (qlcnic_sriov_alloc_bc_trans(&trans)) return; if (hdr->op_type == QLC_BC_CMD) err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); else err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); if (err) { qlcnic_sriov_cleanup_transaction(trans); return; } cmd.op_type = hdr->op_type; if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, QLC_BC_COMMAND)) { qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); return; } pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); qlcnic_sriov_pull_bc_msg(vf->adapter, (u32 *)(trans->req_hdr + trans->curr_req_frag), (u32 *)(trans->req_pay + trans->curr_req_frag), pay_size); trans->func_id = vf->pci_func; trans->vf = vf; trans->trans_id = hdr->seq_id; trans->curr_req_frag++; if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) return; if (trans->curr_req_frag == trans->req_hdr->num_frags) { if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { qlcnic_free_mbx_args(&cmd); qlcnic_sriov_cleanup_transaction(trans); } } else { spin_lock(&vf->rcv_pend.lock); list_add_tail(&trans->list, &vf->rcv_pend.wait_list); vf->rcv_pend.count++; spin_unlock(&vf->rcv_pend.lock); } } static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct qlcnic_bc_hdr hdr; u32 *ptr = (u32 *)&hdr; u8 msg_type, i; for (i = 2; i < 6; i++) ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); msg_type = hdr.msg_type; switch (msg_type) { case QLC_BC_COMMAND: qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); break; case QLC_BC_RESPONSE: qlcnic_sriov_handle_bc_resp(&hdr, vf); break; } } static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct qlcnic_adapter *adapter = vf->adapter; if (qlcnic_sriov_pf_check(adapter)) qlcnic_sriov_pf_handle_flr(sriov, vf); else dev_err(&adapter->pdev->dev, "Invalid event to VF. VF should not get FLR event\n"); } void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) { struct qlcnic_vf_info *vf; struct qlcnic_sriov *sriov; int index; u8 pci_func; sriov = adapter->ahw->sriov; pci_func = qlcnic_sriov_target_func_id(event); index = qlcnic_sriov_func_to_index(adapter, pci_func); if (index < 0) return; vf = &sriov->vf_info[index]; vf->pci_func = pci_func; if (qlcnic_sriov_channel_free_check(event)) complete(&vf->ch_free_cmpl); if (qlcnic_sriov_flr_check(event)) { qlcnic_sriov_handle_flr_event(sriov, vf); return; } if (qlcnic_sriov_bc_msg_check(event)) qlcnic_sriov_handle_msg_event(sriov, vf); } int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) { struct qlcnic_cmd_args cmd; int err; if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) return 0; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) return -ENOMEM; if (enable) cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); err = qlcnic_83xx_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to %s bc events, err=%d\n", (enable ? "enable" : "disable"), err); } qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans) { u8 max = QLC_BC_CMD_MAX_RETRY_CNT; u32 state; state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); if (state == QLC_83XX_IDC_DEV_READY) { msleep(20); clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); trans->trans_state = QLC_INIT; if (++adapter->fw_fail_cnt > max) return -EIO; else return 0; } return -EIO; } static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlcnic_bc_trans *trans; int err; u32 rsp_data, opcode, mbx_err_code, rsp; u16 seq = ++adapter->ahw->sriov->bc.trans_counter; u8 func = ahw->pci_func; rsp = qlcnic_sriov_alloc_bc_trans(&trans); if (rsp) goto free_cmd; rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); if (rsp) goto cleanup_transaction; retry: if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", QLCNIC_MBX_RSP(cmd->req.arg[0]), func); goto err_out; } err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); if (err) { dev_err(dev, "MBX command 0x%x timed out for VF %d\n", (cmd->req.arg[0] & 0xffff), func); rsp = QLCNIC_RCODE_TIMEOUT; /* After adapter reset PF driver may take some time to * respond to VF's request. Retry request till maximum retries. */ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && !qlcnic_sriov_retry_bc_cmd(adapter, trans)) goto retry; goto err_out; } rsp_data = cmd->rsp.arg[0]; mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { rsp = QLCNIC_RCODE_SUCCESS; } else { if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { rsp = QLCNIC_RCODE_SUCCESS; } else { rsp = mbx_err_code; if (!rsp) rsp = 1; dev_err(dev, "MBX command 0x%x failed with err:0x%x for VF %d\n", opcode, mbx_err_code, func); } } err_out: if (rsp == QLCNIC_RCODE_TIMEOUT) { ahw->reset_context = 1; adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); } cleanup_transaction: qlcnic_sriov_cleanup_transaction(trans); free_cmd: if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { qlcnic_free_mbx_args(cmd); kfree(cmd); } return rsp; } static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) return qlcnic_sriov_async_issue_cmd(adapter, cmd); else return __qlcnic_sriov_issue_cmd(adapter, cmd); } static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) { struct qlcnic_cmd_args cmd; struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; int ret; memset(&cmd, 0, sizeof(cmd)); if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) return -ENOMEM; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed bc channel %s %d\n", cmd_op ? "term" : "init", ret); goto out; } cmd_op = (cmd.rsp.arg[0] & 0xff); if (cmd.rsp.arg[0] >> 25 == 2) return 2; if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else clear_bit(QLC_BC_VF_STATE, &vf->state); out: qlcnic_free_mbx_args(&cmd); return ret; } static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, enum qlcnic_mac_type mac_type) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; u16 vlan_id; int i; vf = &adapter->ahw->sriov->vf_info[0]; if (!qlcnic_sriov_check_any_vlan(vf)) { qlcnic_nic_add_mac(adapter, mac, 0, mac_type); } else { spin_lock(&vf->vlan_list_lock); for (i = 0; i < sriov->num_allowed_vlans; i++) { vlan_id = vf->sriov_vlans[i]; if (vlan_id) qlcnic_nic_add_mac(adapter, mac, vlan_id, mac_type); } spin_unlock(&vf->vlan_list_lock); if (qlcnic_84xx_check(adapter)) qlcnic_nic_add_mac(adapter, mac, 0, mac_type); } } void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { struct list_head *head = &bc->async_cmd_list; struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); cancel_work_sync(&bc->vf_async_work); spin_lock(&bc->queue_lock); while (!list_empty(head)) { entry = list_entry(head->next, struct qlcnic_async_cmd, list); list_del(&entry->list); kfree(entry->cmd); kfree(entry); } spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct netdev_hw_addr *ha; u32 mode = VPORT_MISS_MODE_DROP; if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > ahw->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; } else { qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC); if (!netdev_mc_empty(netdev)) { qlcnic_flush_mcast_mac(adapter); netdev_for_each_mc_addr(ha, netdev) qlcnic_vf_add_mc_list(netdev, ha->addr, QLCNIC_MULTICAST_MAC); } } /* configure unicast MAC address, if there is not sufficient space * to store all the unicast addresses then enable promiscuous mode */ if (netdev_uc_count(netdev) > ahw->max_uc_count) { mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if (!netdev_uc_empty(netdev)) { netdev_for_each_uc_addr(ha, netdev) qlcnic_vf_add_mc_list(netdev, ha->addr, QLCNIC_UNICAST_MAC); } if (adapter->pdev->is_virtfn) { if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { qlcnic_alloc_lb_filters_mem(adapter); adapter->drv_mac_learn = true; adapter->rx_mac_learn = true; } else { adapter->drv_mac_learn = false; adapter->rx_mac_learn = false; } } qlcnic_nic_set_promisc(adapter, mode); } static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { struct qlcnic_async_cmd *entry, *tmp; struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; struct list_head *head; LIST_HEAD(del_list); bc = container_of(work, struct qlcnic_back_channel, vf_async_work); head = &bc->async_cmd_list; spin_lock(&bc->queue_lock); list_splice_init(head, &del_list); spin_unlock(&bc->queue_lock); list_for_each_entry_safe(entry, tmp, &del_list, list) { list_del(&entry->list); cmd = entry->cmd; __qlcnic_sriov_issue_cmd(bc->adapter, cmd); kfree(entry); } if (!list_empty(head)) queue_work(bc->bc_async_wq, &bc->vf_async_work); return; } static struct qlcnic_async_cmd * qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, struct qlcnic_cmd_args *cmd) { struct qlcnic_async_cmd *entry = NULL; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return NULL; entry->cmd = cmd; spin_lock(&bc->queue_lock); list_add_tail(&entry->list, &bc->async_cmd_list); spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, struct qlcnic_cmd_args *cmd) { struct qlcnic_async_cmd *entry = NULL; entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); if (!entry) { qlcnic_free_mbx_args(cmd); kfree(cmd); return; } queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; if (adapter->need_fw_reset) return -EIO; qlcnic_sriov_schedule_async_cmd(bc, cmd); return 0; } static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) { int err; adapter->need_fw_reset = 0; qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (err) goto err_out_cleanup_bc_intr; err = qlcnic_sriov_vf_init_driver(adapter); if (err) goto err_out_term_channel; return 0; err_out_term_channel: qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); err_out_cleanup_bc_intr: qlcnic_sriov_cfg_bc_intr(adapter, 0); return err; } static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) { if (!qlcnic_up(adapter, netdev)) qlcnic_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); } static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; struct net_device *netdev = adapter->netdev; u8 i, max_ints = ahw->num_msix - 1; netif_device_detach(netdev); qlcnic_83xx_detach_mailbox_work(adapter); qlcnic_83xx_disable_mbx_intr(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); for (i = 0; i < max_ints; i++) { intr_tbl[i].id = i; intr_tbl[i].enabled = 0; intr_tbl[i].src = 0; } ahw->reset_context = 0; } static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; u32 state; if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); adapter->fw_fail_cnt = 0; dev_info(dev, "%s: Reinitialization of VF 0x%x done after FW reset\n", __func__, func); } else { dev_err(dev, "%s: Reinitialization of VF 0x%x failed after FW reset\n", __func__, func); state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); dev_info(dev, "Current state 0x%x after FW reset\n", state); } } return 0; } static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; u32 state; adapter->reset_ctx_cnt++; /* Skip the context reset and check if FW is hung */ if (adapter->reset_ctx_cnt < 3) { adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); dev_info(dev, "Resetting context, wait here to check if FW is in failed state\n"); return 0; } /* Check if number of resets exceed the threshold. * If it exceeds the threshold just fail the VF. */ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); adapter->tx_timeo_cnt = 0; adapter->fw_fail_cnt = 0; adapter->reset_ctx_cnt = 0; qlcnic_sriov_vf_detach(adapter); dev_err(dev, "Device context resets have exceeded the threshold, device interface will be shutdown\n"); return -EIO; } dev_info(dev, "Resetting context of VF 0x%x\n", func); dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", __func__, adapter->reset_ctx_cnt, func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->need_fw_reset = 1; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); adapter->need_fw_reset = 0; if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; adapter->fw_fail_cnt = 0; dev_info(dev, "Done resetting context for VF 0x%x\n", func); } else { dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", __func__, func); state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); dev_info(dev, "%s: Current state 0x%x\n", __func__, state); } return 0; } static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int ret = 0; if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) ret = qlcnic_sriov_vf_handle_dev_ready(adapter); else if (ahw->reset_context) ret = qlcnic_sriov_vf_handle_context_reset(adapter); clear_bit(__QLCNIC_RESETTING, &adapter->state); return ret; } static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) { struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_err(&adapter->pdev->dev, "Device is in failed state\n"); if (idc->prev_state == QLC_83XX_IDC_DEV_READY) qlcnic_sriov_vf_detach(adapter); clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); clear_bit(__QLCNIC_RESETTING, &adapter->state); return -EIO; } static int qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; } static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; u8 func = adapter->ahw->pci_func; if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { dev_err(&adapter->pdev->dev, "Firmware hang detected by VF 0x%x\n", func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; } static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); return 0; } static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); } static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) { struct qlcnic_adapter *adapter; struct qlc_83xx_idc *idc; int ret = 0; adapter = container_of(work, struct qlcnic_adapter, fw_work.work); idc = &adapter->ahw->idc; idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); switch (idc->curr_state) { case QLC_83XX_IDC_DEV_READY: ret = qlcnic_sriov_vf_idc_ready_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_RESET: case QLC_83XX_IDC_DEV_INIT: ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); break; case QLC_83XX_IDC_DEV_NEED_QUISCENT: ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); break; case QLC_83XX_IDC_DEV_FAILED: ret = qlcnic_sriov_vf_idc_failed_state(adapter); break; case QLC_83XX_IDC_DEV_QUISCENT: break; default: ret = qlcnic_sriov_vf_idc_unknown_state(adapter); } idc->prev_state = idc->curr_state; qlcnic_sriov_vf_periodic_tasks(adapter); if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, idc->delay); } static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) { while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) msleep(20); clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); clear_bit(__QLCNIC_RESETTING, &adapter->state); cancel_delayed_work_sync(&adapter->fw_work); } static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i, err = -EINVAL; if (!vf->sriov_vlans) return err; spin_lock_bh(&vf->vlan_list_lock); for (i = 0; i < sriov->num_allowed_vlans; i++) { if (vf->sriov_vlans[i] == vlan_id) { err = 0; break; } } spin_unlock_bh(&vf->vlan_list_lock); return err; } static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { int err = 0; spin_lock_bh(&vf->vlan_list_lock); if (vf->num_vlan >= sriov->num_allowed_vlans) err = -EINVAL; spin_unlock_bh(&vf->vlan_list_lock); return err; } static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter, u16 vid, u8 enable) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; bool vlan_exist; u8 allowed = 0; int i; vf = &adapter->ahw->sriov->vf_info[0]; vlan_exist = qlcnic_sriov_check_any_vlan(vf); if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) return -EINVAL; if (enable) { if (qlcnic_83xx_vf_check(adapter) && vlan_exist) return -EINVAL; if (qlcnic_sriov_validate_num_vlans(sriov, vf)) return -EINVAL; if (sriov->any_vlan) { for (i = 0; i < sriov->num_allowed_vlans; i++) { if (sriov->allowed_vlans[i] == vid) allowed = 1; } if (!allowed) return -EINVAL; } } else { if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) return -EINVAL; } return 0; } static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id, enum qlcnic_vlan_operations opcode) { struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_sriov *sriov; sriov = adapter->ahw->sriov; if (!vf->sriov_vlans) return; spin_lock_bh(&vf->vlan_list_lock); switch (opcode) { case QLC_VLAN_ADD: qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); break; case QLC_VLAN_DELETE: qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); break; default: netdev_err(adapter->netdev, "Invalid VLAN operation\n"); } spin_unlock_bh(&vf->vlan_list_lock); return; } int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, u16 vid, u8 enable) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct net_device *netdev = adapter->netdev; struct qlcnic_vf_info *vf; struct qlcnic_cmd_args cmd; int ret; memset(&cmd, 0, sizeof(cmd)); if (vid == 0) return 0; vf = &adapter->ahw->sriov->vf_info[0]; ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable); if (ret) return ret; ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_CFG_GUEST_VLAN); if (ret) return ret; cmd.req.arg[1] = (enable & 1) | vid << 16; qlcnic_sriov_cleanup_async_list(&sriov->bc); ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed to configure guest VLAN, err=%d\n", ret); } else { netif_addr_lock_bh(netdev); qlcnic_free_mac_list(adapter); netif_addr_unlock_bh(netdev); if (enable) qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); else qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); netif_addr_lock_bh(netdev); qlcnic_set_multi(netdev); netif_addr_unlock_bh(netdev); } qlcnic_free_mbx_args(&cmd); return ret; } static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) { struct list_head *head = &adapter->mac_list; struct qlcnic_mac_vlan_list *cur; while (!list_empty(head)) { cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id, QLCNIC_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); if (netif_running(netdev)) qlcnic_down(adapter, netdev); qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); qlcnic_sriov_cfg_bc_intr(adapter, 0); qlcnic_83xx_disable_mbx_intr(adapter); cancel_delayed_work_sync(&adapter->idc_aen_work); return pci_save_state(pdev); } static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) { struct qlc_83xx_idc *idc = &adapter->ahw->idc; struct net_device *netdev = adapter->netdev; int err; set_bit(QLC_83XX_MODULE_LOADED, &idc->status); qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); if (!err) { if (netif_running(netdev)) { err = qlcnic_up(adapter, netdev); if (!err) qlcnic_restore_indev_addr(netdev, NETDEV_UP); } } netif_device_attach(netdev); qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, idc->delay); return err; } int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; int i; for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); if (!vf->sriov_vlans) return -ENOMEM; } return 0; } void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; int i; for (i = 0; i < sriov->num_vfs; i++) { vf = &sriov->vf_info[i]; kfree(vf->sriov_vlans); vf->sriov_vlans = NULL; } } void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i; for (i = 0; i < sriov->num_allowed_vlans; i++) { if (!vf->sriov_vlans[i]) { vf->sriov_vlans[i] = vlan_id; vf->num_vlan++; return; } } } void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, u16 vlan_id) { int i; for (i = 0; i < sriov->num_allowed_vlans; i++) { if (vf->sriov_vlans[i] == vlan_id) { vf->sriov_vlans[i] = 0; vf->num_vlan--; return; } } } bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf) { bool err = false; spin_lock_bh(&vf->vlan_list_lock); if (vf->num_vlan) err = true; spin_unlock_bh(&vf->vlan_list_lock); return err; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/swab.h> #include <linux/dma-mapping.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> #include <linux/sysfs.h> #include <linux/log2.h> #ifdef CONFIG_QLCNIC_HWMON #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #endif #include "qlcnic.h" #include "qlcnic_hw.h" int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; } int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) { return -EOPNOTSUPP; } static ssize_t qlcnic_store_bridged_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); unsigned long new; int ret = -EINVAL; if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)) goto err_out; if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto err_out; if (kstrtoul(buf, 2, &new)) goto err_out; if (!qlcnic_config_bridged_mode(adapter, !!new)) ret = len; err_out: return ret; } static ssize_t qlcnic_show_bridged_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int bridged_mode = 0; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); return sprintf(buf, "%d\n", bridged_mode); } static ssize_t qlcnic_store_diag_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); unsigned long new; if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) adapter->flags ^= QLCNIC_DIAG_ENABLED; return len; } static ssize_t qlcnic_show_diag_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED)); } static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state, u8 *rate) { *rate = LSB(beacon); *state = MSB(beacon); QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state); if (!*state) { *rate = __QLCNIC_MAX_LED_RATE; return 0; } else if (*state > __QLCNIC_MAX_LED_STATE) { return -EINVAL; } if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE)) return -EINVAL; return 0; } static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter, const char *buf, size_t len) { struct qlcnic_hardware_context *ahw = adapter->ahw; unsigned long h_beacon; int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; if (kstrtoul(buf, 2, &h_beacon)) return -EINVAL; qlcnic_get_beacon_state(adapter); if (ahw->beacon_state == h_beacon) return len; rtnl_lock(); if (!ahw->beacon_state) { if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { rtnl_unlock(); return -EBUSY; } } if (h_beacon) err = qlcnic_83xx_config_led(adapter, 1, h_beacon); else err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); if (!err) ahw->beacon_state = h_beacon; if (!ahw->beacon_state) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); rtnl_unlock(); return len; } static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, const char *buf, size_t len) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err, drv_sds_rings = adapter->drv_sds_rings; u16 beacon; u8 b_state, b_rate; if (len != sizeof(u16)) return -EINVAL; memcpy(&beacon, buf, sizeof(u16)); err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate); if (err) return err; qlcnic_get_beacon_state(adapter); if (ahw->beacon_state == b_state) return len; rtnl_lock(); if (!ahw->beacon_state) { if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { rtnl_unlock(); return -EBUSY; } } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { err = -EIO; goto out; } if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST); if (err) goto out; set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); } err = qlcnic_config_led(adapter, b_state, b_rate); if (!err) { err = len; ahw->beacon_state = b_state; } if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, drv_sds_rings); out: if (!ahw->beacon_state) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); rtnl_unlock(); return err; } static ssize_t qlcnic_store_beacon(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int err = 0; if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { dev_warn(dev, "LED test not supported in non privileged mode\n"); return -EOPNOTSUPP; } if (qlcnic_82xx_check(adapter)) err = qlcnic_82xx_store_beacon(adapter, buf, len); else if (qlcnic_83xx_check(adapter)) err = qlcnic_83xx_store_beacon(adapter, buf, len); else return -EIO; return err; } static ssize_t qlcnic_show_beacon(struct device *dev, struct device_attribute *attr, char *buf) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); return sprintf(buf, "%d\n", adapter->ahw->beacon_state); } static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, loff_t offset, size_t size) { size_t crb_size = 4; if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) return -EIO; if (offset < QLCNIC_PCI_CRBSPACE) { if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) crb_size = 8; else return -EINVAL; } if ((size != crb_size) || (offset & (crb_size-1))) return -EINVAL; return 0; } static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; qlcnic_read_crb(adapter, buf, offset, size); qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; ret = qlcnic_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); qlcnic_write_crb(adapter, buf, offset, size); return size; } static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, loff_t offset, size_t size) { if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) return -EIO; if ((size != 8) || (offset & 0x7)) return -EIO; return 0; } static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = qlcnic_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) return -EIO; memcpy(buf, &data, size); qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = qlcnic_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); memcpy(&data, buf, size); if (qlcnic_pci_mem_write_2M(adapter, offset, data)) return -EIO; return size; } int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) { int i; for (i = 0; i < adapter->ahw->total_nic_func; i++) { if (adapter->npars[i].pci_func == pci_func) return i; } dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__); return -EINVAL; } static int validate_pm_config(struct qlcnic_adapter *adapter, struct qlcnic_pm_func_cfg *pm_cfg, int count) { u8 src_pci_func, s_esw_id, d_esw_id; u8 dest_pci_func; int i, src_index, dest_index; for (i = 0; i < count; i++) { src_pci_func = pm_cfg[i].pci_func; dest_pci_func = pm_cfg[i].dest_npar; src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func); if (src_index < 0) return -EINVAL; dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func); if (dest_index < 0) return -EINVAL; s_esw_id = adapter->npars[src_index].phy_port; d_esw_id = adapter->npars[dest_index].phy_port; if (s_esw_id != d_esw_id) return -EINVAL; } return 0; } static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg *pm_cfg; u32 id, action, pci_func; int count, rem, i, ret, index; count = size / sizeof(struct qlcnic_pm_func_cfg); rem = size % sizeof(struct qlcnic_pm_func_cfg); if (rem) return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; ret = validate_pm_config(adapter, pm_cfg, count); if (ret) return ret; for (i = 0; i < count; i++) { pci_func = pm_cfg[i].pci_func; action = !!pm_cfg[i].action; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) return -EINVAL; id = adapter->npars[index].phy_port; ret = qlcnic_config_port_mirroring(adapter, id, action, pci_func); if (ret) return ret; } for (i = 0; i < count; i++) { pci_func = pm_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) return -EINVAL; id = adapter->npars[index].phy_port; adapter->npars[index].enable_pm = !!pm_cfg[i].action; adapter->npars[index].dest_npar = id; } return size; } static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg *pm_cfg; u8 pci_func; u32 count; int i; memset(buf, 0, size); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; count = size / sizeof(struct qlcnic_pm_func_cfg); for (i = 0; i < adapter->ahw->total_nic_func; i++) { pci_func = adapter->npars[i].pci_func; if (pci_func >= count) { dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", __func__, adapter->ahw->total_nic_func, count); continue; } if (!adapter->npars[i].eswitch_status) continue; pm_cfg[pci_func].action = adapter->npars[i].enable_pm; pm_cfg[pci_func].dest_npar = 0; pm_cfg[pci_func].pci_func = i; } qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } static int validate_esw_config(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg, int count) { struct qlcnic_hardware_context *ahw = adapter->ahw; int i, ret; u32 op_mode; u8 pci_func; if (qlcnic_82xx_check(adapter)) op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE); else op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; if (pci_func >= ahw->max_vnic_func) return -EINVAL; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) return -EINVAL; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: if (qlcnic_82xx_check(adapter)) { ret = QLC_DEV_GET_DRV(op_mode, pci_func); } else { ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, pci_func); esw_cfg[i].offload_flags = 0; } if (ret != QLCNIC_NON_PRIV_FUNC) { if (esw_cfg[i].mac_anti_spoof != 0) return -EINVAL; if (esw_cfg[i].mac_override != 1) return -EINVAL; if (esw_cfg[i].promisc_mode != 1) return -EINVAL; } break; case QLCNIC_ADD_VLAN: if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) return -EINVAL; if (!esw_cfg[i].op_type) return -EINVAL; break; case QLCNIC_DEL_VLAN: if (!esw_cfg[i].op_type) return -EINVAL; break; default: return -EINVAL; } } return 0; } static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_func_cfg *esw_cfg; struct qlcnic_npar_info *npar; int count, rem, i, ret; int index; u8 op_mode = 0, pci_func; count = size / sizeof(struct qlcnic_esw_func_cfg); rem = size % sizeof(struct qlcnic_esw_func_cfg); if (rem) return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; ret = validate_esw_config(adapter, esw_cfg, count); if (ret) return ret; for (i = 0; i < count; i++) { if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) return -EINVAL; if (adapter->ahw->pci_func != esw_cfg[i].pci_func) continue; op_mode = esw_cfg[i].op_mode; qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); esw_cfg[i].op_mode = op_mode; esw_cfg[i].pci_func = adapter->ahw->pci_func; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); rtnl_lock(); qlcnic_set_netdev_features(adapter, &esw_cfg[i]); rtnl_unlock(); break; case QLCNIC_ADD_VLAN: qlcnic_set_vlan_config(adapter, &esw_cfg[i]); break; case QLCNIC_DEL_VLAN: esw_cfg[i].vlan_id = 0; qlcnic_set_vlan_config(adapter, &esw_cfg[i]); break; } } if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) goto out; for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) return -EINVAL; npar = &adapter->npars[index]; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: npar->promisc_mode = esw_cfg[i].promisc_mode; npar->mac_override = esw_cfg[i].mac_override; npar->offload_flags = esw_cfg[i].offload_flags; npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; npar->discard_tagged = esw_cfg[i].discard_tagged; break; case QLCNIC_ADD_VLAN: npar->pvid = esw_cfg[i].vlan_id; break; case QLCNIC_DEL_VLAN: npar->pvid = 0; break; } } out: return size; } static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_func_cfg *esw_cfg; u8 pci_func; u32 count; int i; memset(buf, 0, size); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; count = size / sizeof(struct qlcnic_esw_func_cfg); for (i = 0; i < adapter->ahw->total_nic_func; i++) { pci_func = adapter->npars[i].pci_func; if (pci_func >= count) { dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", __func__, adapter->ahw->total_nic_func, count); continue; } if (!adapter->npars[i].eswitch_status) continue; esw_cfg[pci_func].pci_func = pci_func; if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) return -EINVAL; } qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } static int validate_npar_config(struct qlcnic_adapter *adapter, struct qlcnic_npar_func_cfg *np_cfg, int count) { u8 pci_func, i; for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) return -EINVAL; if (!IS_VALID_BW(np_cfg[i].min_bw) || !IS_VALID_BW(np_cfg[i].max_bw)) return -EINVAL; } return 0; } static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_info nic_info; struct qlcnic_npar_func_cfg *np_cfg; int i, count, rem, ret, index; u8 pci_func; count = size / sizeof(struct qlcnic_npar_func_cfg); rem = size % sizeof(struct qlcnic_npar_func_cfg); if (rem) return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); np_cfg = (struct qlcnic_npar_func_cfg *)buf; ret = validate_npar_config(adapter, np_cfg, count); if (ret) return ret; for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; memset(&nic_info, 0, sizeof(struct qlcnic_info)); ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (ret) return ret; nic_info.pci_func = pci_func; nic_info.min_tx_bw = np_cfg[i].min_bw; nic_info.max_tx_bw = np_cfg[i].max_bw; ret = qlcnic_set_nic_info(adapter, &nic_info); if (ret) return ret; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) return -EINVAL; adapter->npars[index].min_bw = nic_info.min_tx_bw; adapter->npars[index].max_bw = nic_info.max_tx_bw; } return size; } static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_npar_func_cfg *np_cfg; struct qlcnic_info nic_info; u8 pci_func; int i, ret; u32 count; memset(&nic_info, 0, sizeof(struct qlcnic_info)); memset(buf, 0, size); np_cfg = (struct qlcnic_npar_func_cfg *)buf; count = size / sizeof(struct qlcnic_npar_func_cfg); for (i = 0; i < adapter->ahw->total_nic_func; i++) { if (adapter->npars[i].pci_func >= count) { dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", __func__, adapter->ahw->total_nic_func, count); continue; } if (!adapter->npars[i].eswitch_status) continue; pci_func = adapter->npars[i].pci_func; if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) continue; ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); if (ret) return ret; np_cfg[pci_func].pci_func = pci_func; np_cfg[pci_func].op_mode = (u8)nic_info.op_mode; np_cfg[pci_func].port_num = nic_info.phys_port; np_cfg[pci_func].fw_capab = nic_info.capabilities; np_cfg[pci_func].min_bw = nic_info.min_tx_bw; np_cfg[pci_func].max_bw = nic_info.max_tx_bw; np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; } qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_statistics port_stats; int ret; if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) return -EINVAL; if (offset >= adapter->ahw->max_vnic_func) return -EINVAL; memset(&port_stats, 0, size); ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); if (ret) return ret; ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) return ret; memcpy(buf, &port_stats, size); return size; } static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_statistics esw_stats; int ret; if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) return -EINVAL; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) return -EINVAL; memset(&esw_stats, 0, size); ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, &esw_stats.rx); if (ret) return ret; ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, &esw_stats.tx); if (ret) return ret; memcpy(buf, &esw_stats, size); return size; } static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, QLCNIC_QUERY_RX_COUNTER); if (ret) return ret; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, QLCNIC_QUERY_TX_COUNTER); if (ret) return ret; return size; } static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; if (qlcnic_83xx_check(adapter)) return -EOPNOTSUPP; if (offset >= adapter->ahw->max_vnic_func) return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, QLCNIC_QUERY_RX_COUNTER); if (ret) return ret; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, QLCNIC_QUERY_TX_COUNTER); if (ret) return ret; return size; } static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pci_func_cfg *pci_cfg; struct qlcnic_pci_info *pci_info; int i, ret; u32 count; pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL); if (!pci_info) return -ENOMEM; ret = qlcnic_get_pci_info(adapter, pci_info); if (ret) { kfree(pci_info); return ret; } pci_cfg = (struct qlcnic_pci_func_cfg *)buf; count = size / sizeof(struct qlcnic_pci_func_cfg); qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32)); for (i = 0; i < count; i++) { pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].func_type = pci_info[i].type; pci_cfg[i].func_state = 0; pci_cfg[i].port_num = pci_info[i].default_port; pci_cfg[i].min_bw = pci_info[i].tx_min_bw; pci_cfg[i].max_bw = pci_info[i].tx_max_bw; memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); } kfree(pci_info); return size; } static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { unsigned char *p_read_buf; int ret, count; struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); if (!size) return -EINVAL; count = size / sizeof(u32); if (size % sizeof(u32)) count++; p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); if (!p_read_buf) return -ENOMEM; if (qlcnic_83xx_lock_flash(adapter) != 0) { kfree(p_read_buf); return -EIO; } ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf, count); if (ret) { qlcnic_83xx_unlock_flash(adapter); kfree(p_read_buf); return ret; } qlcnic_83xx_unlock_flash(adapter); qlcnic_swap32_buffer((u32 *)p_read_buf, count); memcpy(buf, p_read_buf, size); kfree(p_read_buf); return size; } static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { int i, ret, count; unsigned char *p_cache, *p_src; p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); if (!p_cache) return -ENOMEM; count = size / sizeof(u32); qlcnic_swap32_buffer((u32 *)buf, count); memcpy(p_cache, buf, size); p_src = p_cache; if (qlcnic_83xx_lock_flash(adapter) != 0) { kfree(p_cache); return -EIO; } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_enable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) { ret = qlcnic_83xx_flash_bulk_write(adapter, offset, (u32 *)p_src, QLC_83XX_FLASH_WRITE_MAX); if (ret) { if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return 0; } static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, char *buf, loff_t offset, size_t size) { int i, ret, count; unsigned char *p_cache, *p_src; p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); if (!p_cache) return -ENOMEM; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); memcpy(p_cache, buf, size); p_src = p_cache; count = size / sizeof(u32); if (qlcnic_83xx_lock_flash(adapter) != 0) { kfree(p_cache); return -EIO; } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_enable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } for (i = 0; i < count; i++) { ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src); if (ret) { if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } p_src = p_src + sizeof(u32); offset = offset + sizeof(u32); } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return -EIO; } } kfree(p_cache); qlcnic_83xx_unlock_flash(adapter); return 0; } static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { int ret; static int flash_mode; unsigned long data; struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &data); if (ret) return ret; switch (data) { case QLC_83XX_FLASH_SECTOR_ERASE_CMD: flash_mode = QLC_83XX_ERASE_MODE; ret = qlcnic_83xx_erase_flash_sector(adapter, offset); if (ret) { dev_err(&adapter->pdev->dev, "%s failed at %d\n", __func__, __LINE__); return -EIO; } break; case QLC_83XX_FLASH_BULK_WRITE_CMD: flash_mode = QLC_83XX_BULK_WRITE_MODE; break; case QLC_83XX_FLASH_WRITE_CMD: flash_mode = QLC_83XX_WRITE_MODE; break; default: if (flash_mode == QLC_83XX_BULK_WRITE_MODE) { ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf, offset, size); if (ret) { dev_err(&adapter->pdev->dev, "%s failed at %d\n", __func__, __LINE__); return -EIO; } } if (flash_mode == QLC_83XX_WRITE_MODE) { ret = qlcnic_83xx_sysfs_flash_write(adapter, buf, offset, size); if (ret) { dev_err(&adapter->pdev->dev, "%s failed at %d\n", __func__, __LINE__); return -EIO; } } } return size; } static const struct device_attribute dev_attr_bridged_mode = { .attr = { .name = "bridged_mode", .mode = 0644 }, .show = qlcnic_show_bridged_mode, .store = qlcnic_store_bridged_mode, }; static const struct device_attribute dev_attr_diag_mode = { .attr = { .name = "diag_mode", .mode = 0644 }, .show = qlcnic_show_diag_mode, .store = qlcnic_store_diag_mode, }; static const struct device_attribute dev_attr_beacon = { .attr = { .name = "beacon", .mode = 0644 }, .show = qlcnic_show_beacon, .store = qlcnic_store_beacon, }; static const struct bin_attribute bin_attr_crb = { .attr = { .name = "crb", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_crb, .write = qlcnic_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { .attr = { .name = "mem", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_mem, .write = qlcnic_sysfs_write_mem, }; static const struct bin_attribute bin_attr_npar_config = { .attr = { .name = "npar_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_npar_config, .write = qlcnic_sysfs_write_npar_config, }; static const struct bin_attribute bin_attr_pci_config = { .attr = { .name = "pci_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_pci_config, .write = NULL, }; static const struct bin_attribute bin_attr_port_stats = { .attr = { .name = "port_stats", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_get_port_stats, .write = qlcnic_sysfs_clear_port_stats, }; static const struct bin_attribute bin_attr_esw_stats = { .attr = { .name = "esw_stats", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_get_esw_stats, .write = qlcnic_sysfs_clear_esw_stats, }; static const struct bin_attribute bin_attr_esw_config = { .attr = { .name = "esw_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_esw_config, .write = qlcnic_sysfs_write_esw_config, }; static const struct bin_attribute bin_attr_pm_config = { .attr = { .name = "pm_config", .mode = 0644 }, .size = 0, .read = qlcnic_sysfs_read_pm_config, .write = qlcnic_sysfs_write_pm_config, }; static const struct bin_attribute bin_attr_flash = { .attr = { .name = "flash", .mode = 0644 }, .size = 0, .read = qlcnic_83xx_sysfs_flash_read_handler, .write = qlcnic_83xx_sysfs_flash_write_handler, }; #ifdef CONFIG_QLCNIC_HWMON static ssize_t qlcnic_hwmon_show_temp(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct qlcnic_adapter *adapter = dev_get_drvdata(dev); unsigned int temperature = 0, value = 0; if (qlcnic_83xx_check(adapter)) value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP); else if (qlcnic_82xx_check(adapter)) value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP); temperature = qlcnic_get_temp_val(value); /* display millidegree celcius */ temperature *= 1000; return sprintf(buf, "%u\n", temperature); } /* hwmon-sysfs attributes */ static SENSOR_DEVICE_ATTR(temp1_input, 0444, qlcnic_hwmon_show_temp, NULL, 1); static struct attribute *qlcnic_hwmon_attrs[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, NULL }; ATTRIBUTE_GROUPS(qlcnic_hwmon); void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; struct device *hwmon_dev; /* Skip hwmon registration for a VF device */ if (qlcnic_sriov_vf_check(adapter)) { adapter->ahw->hwmon_dev = NULL; return; } hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name, adapter, qlcnic_hwmon_groups); if (IS_ERR(hwmon_dev)) { dev_err(dev, "Cannot register with hwmon, err=%ld\n", PTR_ERR(hwmon_dev)); hwmon_dev = NULL; } adapter->ahw->hwmon_dev = hwmon_dev; } void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter) { struct device *hwmon_dev = adapter->ahw->hwmon_dev; if (hwmon_dev) { hwmon_device_unregister(hwmon_dev); adapter->ahw->hwmon_dev = NULL; } } #endif void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) if (device_create_file(dev, &dev_attr_bridged_mode)) dev_warn(dev, "failed to create bridged_mode sysfs entry\n"); } void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) device_remove_file(dev, &dev_attr_bridged_mode); } static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (device_create_bin_file(dev, &bin_attr_port_stats)) dev_info(dev, "failed to create port stats sysfs entry"); if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) return; if (device_create_file(dev, &dev_attr_diag_mode)) dev_info(dev, "failed to create diag_mode sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_crb)) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) return; if (device_create_bin_file(dev, &bin_attr_pci_config)) dev_info(dev, "failed to create pci config sysfs entry"); if (device_create_file(dev, &dev_attr_beacon)) dev_info(dev, "failed to create beacon sysfs entry"); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; if (device_create_bin_file(dev, &bin_attr_esw_config)) dev_info(dev, "failed to create esw config sysfs entry"); if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return; if (device_create_bin_file(dev, &bin_attr_npar_config)) dev_info(dev, "failed to create npar config sysfs entry"); if (device_create_bin_file(dev, &bin_attr_pm_config)) dev_info(dev, "failed to create pm config sysfs entry"); if (device_create_bin_file(dev, &bin_attr_esw_stats)) dev_info(dev, "failed to create eswitch stats sysfs entry"); } static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; device_remove_bin_file(dev, &bin_attr_port_stats); if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) return; device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) return; device_remove_bin_file(dev, &bin_attr_pci_config); device_remove_file(dev, &dev_attr_beacon); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) return; device_remove_bin_file(dev, &bin_attr_esw_config); if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) return; device_remove_bin_file(dev, &bin_attr_npar_config); device_remove_bin_file(dev, &bin_attr_pm_config); device_remove_bin_file(dev, &bin_attr_esw_stats); } void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter) { qlcnic_create_diag_entries(adapter); } void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter) { qlcnic_remove_diag_entries(adapter); } void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; qlcnic_create_diag_entries(adapter); if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash)) dev_info(dev, "failed to create flash sysfs entry\n"); } void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; qlcnic_remove_diag_entries(adapter); sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash); }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/types.h> #include "qlcnic.h" #define QLC_DCB_NUM_PARAM 3 #define QLC_DCB_LOCAL_IDX 0 #define QLC_DCB_OPER_IDX 1 #define QLC_DCB_PEER_IDX 2 #define QLC_DCB_GET_MAP(V) (1 << V) #define QLC_DCB_FW_VER 0x2 #define QLC_DCB_MAX_TC 0x8 #define QLC_DCB_MAX_APP 0x8 #define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC #define QLC_DCB_MAX_PG QLC_DCB_MAX_TC #define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) #define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) #define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf) #define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf) #define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf) #define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf) #define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7) #define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff) #define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1) #define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff) #define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff) #define QLC_DCB_LOCAL_PARAM_FWID 0x3 #define QLC_DCB_OPER_PARAM_FWID 0x1 #define QLC_DCB_PEER_PARAM_FWID 0x2 #define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf) #define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1) #define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1) #define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24) #define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf) #define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1) #define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1) #define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7) #define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) #define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops; static void qlcnic_dcb_aen_work(struct work_struct *); static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *); static void __qlcnic_dcb_free(struct qlcnic_dcb *); static int __qlcnic_dcb_attach(struct qlcnic_dcb *); static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *); static void __qlcnic_dcb_get_info(struct qlcnic_dcb *); static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *); static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *); static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *); static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *); static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *); static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *); struct qlcnic_dcb_capability { bool tsa_capability; bool ets_capability; u8 max_num_tc; u8 max_ets_tc; u8 max_pfc_tc; u8 dcb_capability; }; struct qlcnic_dcb_param { u32 hdr_prio_pfc_map[2]; u32 prio_pg_map[2]; u32 pg_bw_map[2]; u32 pg_tsa_map[2]; u32 app[QLC_DCB_MAX_APP]; }; struct qlcnic_dcb_mbx_params { /* 1st local, 2nd operational 3rd remote */ struct qlcnic_dcb_param type[3]; u32 prio_tc_map; }; struct qlcnic_82xx_dcb_param_mbx_le { __le32 hdr_prio_pfc_map[2]; __le32 prio_pg_map[2]; __le32 pg_bw_map[2]; __le32 pg_tsa_map[2]; __le32 app[QLC_DCB_MAX_APP]; }; enum qlcnic_dcb_selector { QLC_SELECTOR_DEF = 0x0, QLC_SELECTOR_ETHER, QLC_SELECTOR_TCP, QLC_SELECTOR_UDP, }; enum qlcnic_dcb_prio_type { QLC_PRIO_NONE = 0, QLC_PRIO_GROUP, QLC_PRIO_LINK, }; enum qlcnic_dcb_pfc_type { QLC_PFC_DISABLED = 0, QLC_PFC_FULL, QLC_PFC_TX, QLC_PFC_RX }; struct qlcnic_dcb_prio_cfg { bool valid; enum qlcnic_dcb_pfc_type pfc_type; }; struct qlcnic_dcb_pg_cfg { bool valid; u8 total_bw_percent; /* of Link/ port BW */ u8 prio_count; u8 tsa_type; }; struct qlcnic_dcb_tc_cfg { bool valid; struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO]; enum qlcnic_dcb_prio_type prio_type; /* always prio_link */ u8 link_percent; /* % of link bandwidth */ u8 bwg_percent; /* % of BWG's bandwidth */ u8 up_tc_map; u8 pgid; }; struct qlcnic_dcb_app { bool valid; enum qlcnic_dcb_selector selector; u16 protocol; u8 priority; }; struct qlcnic_dcb_cee { struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC]; struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG]; struct qlcnic_dcb_app app[QLC_DCB_MAX_APP]; bool tc_param_valid; bool pfc_mode_enable; }; struct qlcnic_dcb_cfg { /* 0 - local, 1 - operational, 2 - remote */ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM]; struct qlcnic_dcb_capability capability; u32 version; }; static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, .query_cee_param = qlcnic_83xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, .aen_handler = qlcnic_83xx_dcb_aen_handler, }; static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, .free = __qlcnic_dcb_free, .attach = __qlcnic_dcb_attach, .query_hw_capability = __qlcnic_dcb_query_hw_capability, .get_info = __qlcnic_dcb_get_info, .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, .query_cee_param = qlcnic_82xx_dcb_query_cee_param, .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, .aen_handler = qlcnic_82xx_dcb_aen_handler, }; static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_GET_NUMAPP(val); else return QLC_83XX_DCB_GET_NUMAPP(val); } static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_PFC_VALID(val); else return QLC_83XX_DCB_PFC_VALID(val); } static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_TSA_VALID(val); else return QLC_83XX_DCB_TSA_VALID(val); } static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter, u32 val) { if (qlcnic_82xx_check(adapter)) return QLC_82XX_DCB_GET_PRIOMAP_APP(val); else return QLC_83XX_DCB_GET_PRIOMAP_APP(val); } static int qlcnic_dcb_prio_count(u8 up_tc_map) { int j; for (j = 0; j < QLC_DCB_MAX_TC; j++) if (up_tc_map & QLC_DCB_GET_MAP(j)) break; return j; } static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb) { if (test_bit(QLCNIC_DCB_STATE, &dcb->state)) dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; } static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) { if (qlcnic_82xx_check(adapter)) adapter->dcb->ops = &qlcnic_82xx_dcb_ops; else if (qlcnic_83xx_check(adapter)) adapter->dcb->ops = &qlcnic_83xx_dcb_ops; } int qlcnic_register_dcb(struct qlcnic_adapter *adapter) { struct qlcnic_dcb *dcb; if (qlcnic_sriov_vf_check(adapter)) return 0; dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC); if (!dcb) return -ENOMEM; adapter->dcb = dcb; dcb->adapter = adapter; qlcnic_set_dcb_ops(adapter); dcb->state = 0; return 0; } static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb) { struct qlcnic_adapter *adapter; if (!dcb) return; adapter = dcb->adapter; while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) usleep_range(10000, 11000); cancel_delayed_work_sync(&dcb->aen_work); if (dcb->wq) { destroy_workqueue(dcb->wq); dcb->wq = NULL; } kfree(dcb->cfg); dcb->cfg = NULL; kfree(dcb->param); dcb->param = NULL; kfree(dcb); adapter->dcb = NULL; } static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) { qlcnic_dcb_get_hw_capability(dcb); qlcnic_dcb_get_cee_cfg(dcb); } static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb) { int err = 0; INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); if (!dcb->wq) { dev_err(&dcb->adapter->pdev->dev, "DCB workqueue allocation failed. DCB will be disabled\n"); return -1; } dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); if (!dcb->cfg) { err = -ENOMEM; goto out_free_wq; } dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC); if (!dcb->param) { err = -ENOMEM; goto out_free_cfg; } return 0; out_free_cfg: kfree(dcb->cfg); dcb->cfg = NULL; out_free_wq: destroy_workqueue(dcb->wq); dcb->wq = NULL; return err; } static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) { struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_cmd_args cmd; u32 mbx_out; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP); if (err) return err; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to query DCBX capability, err %d\n", err); } else { mbx_out = cmd.rsp.arg[1]; if (buf) memcpy(buf, &mbx_out, sizeof(u32)); } qlcnic_free_mbx_args(&cmd); return err; } static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val) { struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); err = qlcnic_dcb_query_hw_capability(dcb, (char *)val); if (err) return err; mbx_out = *val; if (QLC_DCB_TSA_SUPPORT(mbx_out)) cap->tsa_capability = true; if (QLC_DCB_ETS_SUPPORT(mbx_out)) cap->ets_capability = true; cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out); cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out); cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out); if (cap->max_num_tc > QLC_DCB_MAX_TC || cap->max_ets_tc > cap->max_num_tc || cap->max_pfc_tc > cap->max_num_tc) { dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n"); return -EINVAL; } return err; } static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_cfg *cfg = dcb->cfg; struct qlcnic_dcb_capability *cap; u32 mbx_out; int err; err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; cap = &cfg->capability; cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) { u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; struct device *dev = &adapter->pdev->dev; dma_addr_t cardrsp_phys_addr; struct qlcnic_dcb_param rsp; struct qlcnic_cmd_args cmd; u64 phys_addr; void *addr; int err, i; switch (type) { case QLC_DCB_LOCAL_PARAM_FWID: case QLC_DCB_OPER_PARAM_FWID: case QLC_DCB_PEER_PARAM_FWID: break; default: dev_err(dev, "Invalid parameter type %d\n", type); return -EINVAL; } addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; prsp_le = addr; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); if (err) goto out_free_rsp; phys_addr = cardrsp_phys_addr; cmd.req.arg[1] = size | (type << 16); cmd.req.arg[2] = MSD(phys_addr); cmd.req.arg[3] = LSD(phys_addr); err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(dev, "Failed to query DCBX parameter, err %d\n", err); goto out; } memset(&rsp, 0, sizeof(struct qlcnic_dcb_param)); rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]); rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]); rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]); rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]); rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]); rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]); rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]); rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]); for (i = 0; i < QLC_DCB_MAX_APP; i++) rsp.app[i] = le32_to_cpu(prsp_le->app[i]); if (buf) memcpy(buf, &rsp, size); out: qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(dev, size, addr, cardrsp_phys_addr); return err; } static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_mbx_params *mbx; int err; mbx = dcb->param; if (!mbx) return 0; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0], QLC_DCB_LOCAL_PARAM_FWID); if (err) return err; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1], QLC_DCB_OPER_PARAM_FWID); if (err) return err; err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2], QLC_DCB_PEER_PARAM_FWID); if (err) return err; mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } static void qlcnic_dcb_aen_work(struct work_struct *work) { struct qlcnic_dcb *dcb; dcb = container_of(work, struct qlcnic_dcb, aen_work.work); qlcnic_dcb_get_cee_cfg(dcb); clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state); } static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; u32 mbx_out; int err; err = __qlcnic_dcb_get_capability(dcb, &mbx_out); if (err) return err; if (mbx_out & BIT_2) cap->dcb_capability = DCB_CAP_DCBX_VER_CEE; if (mbx_out & BIT_3) cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE; if (cap->dcb_capability) cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) set_bit(QLCNIC_DCB_STATE, &dcb->state); return err; } static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 idx) { struct qlcnic_adapter *adapter = dcb->adapter; struct qlcnic_dcb_mbx_params mbx_out; int err, i, j, k, max_app, size; struct qlcnic_dcb_param *each; struct qlcnic_cmd_args cmd; u32 val; char *p; size = 0; memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params)); memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params)); err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); if (err) return err; cmd.req.arg[0] |= QLC_DCB_FW_VER << 29; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to query DCBX param, err %d\n", err); goto out; } mbx_out.prio_tc_map = cmd.rsp.arg[1]; p = memcpy(buf, &mbx_out, sizeof(u32)); k = 2; p += sizeof(u32); for (j = 0; j < QLC_DCB_NUM_PARAM; j++) { each = &mbx_out.type[j]; each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++]; each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++]; each->prio_pg_map[0] = cmd.rsp.arg[k++]; each->prio_pg_map[1] = cmd.rsp.arg[k++]; each->pg_bw_map[0] = cmd.rsp.arg[k++]; each->pg_bw_map[1] = cmd.rsp.arg[k++]; each->pg_tsa_map[0] = cmd.rsp.arg[k++]; each->pg_tsa_map[1] = cmd.rsp.arg[k++]; val = each->hdr_prio_pfc_map[0]; max_app = qlcnic_dcb_get_num_app(adapter, val); for (i = 0; i < max_app; i++) each->app[i] = cmd.rsp.arg[i + k]; size = 16 * sizeof(u32); memcpy(p, &each->hdr_prio_pfc_map[0], size); p += size; if (j == 0) k = 18; else k = 34; } out: qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) { int err; err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0); if (err) return err; qlcnic_dcb_data_cee_param_map(dcb->adapter); return err; } static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) { u32 *val = data; if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) return; if (*val & BIT_8) set_bit(QLCNIC_DCB_STATE, &dcb->state); else clear_bit(QLCNIC_DCB_STATE, &dcb->state); queue_delayed_work(dcb->wq, &dcb->aen_work, 0); } static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx, struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_tc_cfg *tc_cfg; u8 i, tc, pgid; for (i = 0; i < QLC_DCB_MAX_PRIO; i++) { tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i); tc_cfg = &type->tc_cfg[tc]; tc_cfg->valid = true; tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i); if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) && type->pfc_mode_enable) { tc_cfg->prio_cfg[i].valid = true; tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL; } if (i < 4) pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i); else pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i); tc_cfg->pgid = pgid; tc_cfg->prio_type = QLC_PRIO_LINK; type->pg_cfg[tc_cfg->pgid].prio_count++; } } static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_pg_cfg *pg_cfg; u8 i, tsa, bw_per; for (i = 0; i < QLC_DCB_MAX_PG; i++) { pg_cfg = &type->pg_cfg[i]; pg_cfg->valid = true; if (i < 4) { bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i); tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i); } else { bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i); tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i); } pg_cfg->total_bw_percent = bw_per; pg_cfg->tsa_type = tsa; } } static void qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx, struct qlcnic_dcb_param *each, struct qlcnic_dcb_cee *type) { struct qlcnic_dcb_app *app; u8 i, num_app, map, cnt; struct dcb_app new_app; num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]); for (i = 0; i < num_app; i++) { app = &type->app[i]; app->valid = true; /* Only for CEE (-1) */ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1; new_app.selector = app->selector; app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]); new_app.protocol = app->protocol; map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]); cnt = qlcnic_dcb_prio_count(map); if (cnt >= QLC_DCB_MAX_TC) cnt = 0; app->priority = cnt; new_app.priority = cnt; if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops) dcb_setapp(adapter->netdev, &new_app); } } static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx) { struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param; struct qlcnic_dcb_param *each = &mbx->type[idx]; struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; struct qlcnic_dcb_cee *type = &cfg->type[idx]; type->tc_param_valid = false; type->pfc_mode_enable = false; memset(type->tc_cfg, 0, sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC); memset(type->pg_cfg, 0, sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC); if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && cfg->capability.max_pfc_tc) type->pfc_mode_enable = true; if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && cfg->capability.max_ets_tc) type->tc_param_valid = true; qlcnic_dcb_fill_cee_tc_params(mbx, each, type); qlcnic_dcb_fill_cee_pg_params(each, type); qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type); } static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter) { int i; for (i = 0; i < QLC_DCB_NUM_PARAM; i++) qlcnic_dcb_map_cee_params(adapter, i); dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); } static u8 qlcnic_dcb_get_state(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state); } static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) { memcpy(addr, netdev->perm_addr, netdev->addr_len); } static void qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *pgid, u8 *bw_per, u8 *up_tc_map) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_tc_cfg *tc_cfg, *temp; struct qlcnic_dcb_cee *type; u8 i, cnt, pg; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; *prio = *pgid = *bw_per = *up_tc_map = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) return; tc_cfg = &type->tc_cfg[tc]; if (!tc_cfg->valid) return; *pgid = tc_cfg->pgid; *prio = tc_cfg->prio_type; *up_tc_map = tc_cfg->up_tc_map; pg = *pgid; for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) { temp = &type->tc_cfg[i]; if (temp->valid && (pg == temp->pgid)) cnt++; } tc_cfg->bwg_percent = (100 / cnt); *bw_per = tc_cfg->bwg_percent; } static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_pg_cfg *pgcfg; struct qlcnic_dcb_cee *type; *bw_pct = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->tc_param_valid) return; if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) return; pgcfg = &type->pg_cfg[pgid]; if (!pgcfg->valid) return; *bw_pct = pgcfg->total_bw_percent; } static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_tc_cfg *tc_cfg; u8 val = QLC_DCB_GET_MAP(prio); struct qlcnic_dcb_cee *type; u8 i; *setting = 0; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || !type->pfc_mode_enable) return; for (i = 0; i < QLC_DCB_MAX_TC; i++) { tc_cfg = &type->tc_cfg[i]; if (!tc_cfg->valid) continue; if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid)) *setting = tc_cfg->prio_cfg[prio].pfc_type; } } static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, u8 *cap) { struct qlcnic_adapter *adapter = netdev_priv(netdev); if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 1; switch (capid) { case DCB_CAP_ATTR_PG: case DCB_CAP_ATTR_UP2TC: case DCB_CAP_ATTR_PFC: case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_PG_TCS: case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; /* 8 priorities for PGs */ break; case DCB_CAP_ATTR_DCBX: *cap = adapter->dcb->cfg->capability.dcb_capability; break; default: *cap = false; } return 0; } static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return -EINVAL; switch (attr) { case DCB_NUMTCS_ATTR_PG: *num = cfg->capability.max_ets_tc; return 0; case DCB_NUMTCS_ATTR_PFC: *num = cfg->capability.max_pfc_tc; return 0; default: return -EINVAL; } } static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return -EINVAL; return dcb_getapp(netdev, &app); } static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb *dcb = adapter->dcb; if (!test_bit(QLCNIC_DCB_STATE, &dcb->state)) return 0; return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; } static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; return cfg->capability.dcb_capability; } static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *type; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 1; type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; *flag = 0; switch (fid) { case DCB_FEATCFG_ATTR_PG: if (type->tc_param_valid) *flag |= DCB_FEATCFG_ENABLE; else *flag |= DCB_FEATCFG_ERROR; break; case DCB_FEATCFG_ATTR_PFC: if (type->pfc_mode_enable) { if (type->tc_cfg[0].prio_cfg[0].pfc_type) *flag |= DCB_FEATCFG_ENABLE; } else { *flag |= DCB_FEATCFG_ERROR; } break; case DCB_FEATCFG_ATTR_APP: *flag |= DCB_FEATCFG_ENABLE; break; default: netdev_err(netdev, "Invalid Feature ID %d\n", fid); return 1; } return 0; } static inline void qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { *prio_type = *pgid = *bw_pct = *up_map = 0; } static inline void qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { *bw_pct = 0; } static int qlcnic_dcb_peer_app_info(struct net_device *netdev, struct dcb_peer_app_info *info, u16 *app_count) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; int i; memset(info, 0, sizeof(*info)); *app_count = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0; i < QLC_DCB_MAX_APP; i++) { if (peer->app[i].valid) (*app_count)++; } return 0; } static int qlcnic_dcb_peer_app_table(struct net_device *netdev, struct dcb_app *table) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; struct qlcnic_dcb_app *app; int i, j; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) { app = &peer->app[i]; if (!app->valid) continue; table[j].selector = app->selector; table[j].priority = app->priority; table[j++].protocol = app->protocol; } return 0; } static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev, struct cee_pg *pg) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cee *peer; u8 i, j, k, map; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) { if (!peer->pg_cfg[i].valid) continue; pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent; for (k = 0; k < QLC_DCB_MAX_TC; k++) { if (peer->tc_cfg[i].valid && (peer->tc_cfg[i].pgid == i)) { map = peer->tc_cfg[i].up_tc_map; pg->prio_pg[j++] = map; break; } } } return 0; } static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev, struct cee_pfc *pfc) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; struct qlcnic_dcb_tc_cfg *tc; struct qlcnic_dcb_cee *peer; u8 i, setting, prio; pfc->pfc_en = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) return 0; peer = &cfg->type[QLC_DCB_PEER_IDX]; for (i = 0; i < QLC_DCB_MAX_TC; i++) { tc = &peer->tc_cfg[i]; prio = qlcnic_dcb_prio_count(tc->up_tc_map); setting = 0; qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting); if (setting) pfc->pfc_en |= QLC_DCB_GET_MAP(i); } pfc->tcs_supported = cfg->capability.max_pfc_tc; return 0; } static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = { .getstate = qlcnic_dcb_get_state, .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr, .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx, .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx, .getpfccfg = qlcnic_dcb_get_pfc_cfg, .getcap = qlcnic_dcb_get_capability, .getnumtcs = qlcnic_dcb_get_num_tcs, .getapp = qlcnic_dcb_get_app, .getpfcstate = qlcnic_dcb_get_pfc_state, .getdcbx = qlcnic_dcb_get_dcbx, .getfeatcfg = qlcnic_dcb_get_feat_cfg, .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx, .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx, .peer_getappinfo = qlcnic_dcb_peer_app_info, .peer_getapptable = qlcnic_dcb_peer_app_table, .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg, .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc, };
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include <linux/types.h> #include "qlcnic_sriov.h" #include "qlcnic.h" #define QLCNIC_SRIOV_VF_MAX_MAC 7 #define QLC_VF_MIN_TX_RATE 100 #define QLC_VF_MAX_TX_RATE 9999 #define QLC_MAC_OPCODE_MASK 0x7 #define QLC_VF_FLOOD_BIT BIT_16 #define QLC_FLOOD_MODE 0x5 #define QLC_SRIOV_ALLOW_VLAN0 BIT_19 #define QLC_INTR_COAL_TYPE_MASK 0x7 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); struct qlcnic_sriov_cmd_handler { int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); }; struct qlcnic_sriov_fw_cmd_handler { u32 cmd; int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); }; static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info, u16 vport_id) { struct qlcnic_cmd_args cmd; int err; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO)) return -ENOMEM; cmd.req.arg[1] = (vport_id << 16) | 0x1; cmd.req.arg[2] = npar_info->bit_offsets; cmd.req.arg[2] |= npar_info->min_tx_bw << 16; cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16); cmd.req.arg[4] = npar_info->max_tx_mac_filters; cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16; cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters | (npar_info->max_rx_ip_addr << 16); cmd.req.arg[6] = npar_info->max_rx_lro_flow | (npar_info->max_rx_status_rings << 16); cmd.req.arg[7] = npar_info->max_rx_buf_rings | (npar_info->max_rx_ques << 16); cmd.req.arg[8] = npar_info->max_tx_vlan_keys; cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16; cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to set vport info, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, struct qlcnic_info *info, u16 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_resources *res = &sriov->ff_max; u16 num_macs = sriov->num_allowed_vlans + 1; int ret = -EIO, vpid, id; struct qlcnic_vport *vp; u32 num_vfs, max, temp; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); if (vpid < 0) return -EINVAL; num_vfs = sriov->num_vfs; max = num_vfs + 1; info->bit_offsets = 0xffff; info->max_tx_ques = res->num_tx_queues / max; if (qlcnic_83xx_pf_check(adapter)) num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC; info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; if (adapter->ahw->pci_func == func) { info->min_tx_bw = 0; info->max_tx_bw = MAX_BW; temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; info->max_rx_ucast_mac_filters = temp; temp = res->num_tx_mac_filters - num_macs * num_vfs; info->max_tx_mac_filters = temp; temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC; temp = res->num_rx_mcast_mac_filters - temp; info->max_rx_mcast_mac_filters = temp; info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; } else { id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) return id; vp = sriov->vf_info[id].vp; info->min_tx_bw = vp->min_tx_bw; info->max_tx_bw = vp->max_tx_bw; info->max_rx_ucast_mac_filters = num_macs; info->max_tx_mac_filters = num_macs; temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC; info->max_rx_mcast_mac_filters = temp; info->max_tx_ques = QLCNIC_SINGLE_RING; } info->max_rx_ip_addr = res->num_destip / max; info->max_rx_status_rings = res->num_rx_status_rings / max; info->max_rx_buf_rings = res->num_rx_buf_rings / max; info->max_rx_ques = res->num_rx_queues / max; info->max_rx_lro_flow = res->num_lro_flows_supported / max; info->max_tx_vlan_keys = res->num_txvlan_keys; info->max_local_ipv6_addrs = res->max_local_ipv6_addrs; info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs; ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid); if (ret) return ret; return 0; } static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter, struct qlcnic_info *info) { struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; ff_max->num_tx_mac_filters = info->max_tx_mac_filters; ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters; ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters; ff_max->num_txvlan_keys = info->max_tx_vlan_keys; ff_max->num_rx_queues = info->max_rx_ques; ff_max->num_tx_queues = info->max_tx_ques; ff_max->num_lro_flows_supported = info->max_rx_lro_flow; ff_max->num_destip = info->max_rx_ip_addr; ff_max->num_rx_buf_rings = info->max_rx_buf_rings; ff_max->num_rx_status_rings = info->max_rx_status_rings; ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs; ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs; } static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; int temp, total_fn; temp = npar_info->max_rx_mcast_mac_filters; total_fn = sriov->num_vfs + 1; temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn); sriov->num_allowed_vlans = temp - 1; if (qlcnic_83xx_pf_check(adapter)) sriov->num_allowed_vlans = 1; netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n", sriov->num_allowed_vlans); } static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter, struct qlcnic_info *npar_info) { int err; struct qlcnic_cmd_args cmd; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO)) return -ENOMEM; cmd.req.arg[1] = 0x2; err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_err(&adapter->pdev->dev, "Failed to get PF info, err=%d\n", err); goto out; } npar_info->total_pf = cmd.rsp.arg[2] & 0xff; npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff; npar_info->max_vports = MSW(cmd.rsp.arg[2]); npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]); npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]); npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]); npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]); npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]); npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]); npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]); npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]); npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]); npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]); npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]); npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]); qlcnic_sriov_set_vf_max_vlan(adapter, npar_info); qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info); dev_info(&adapter->pdev->dev, "\n\ttotal_pf: %d,\n" "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n" "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n", npar_info->total_pf, npar_info->total_rss_engines, npar_info->max_vports, npar_info->max_tx_ques, npar_info->max_tx_mac_filters, npar_info->max_rx_mcast_mac_filters, npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, npar_info->max_rx_buf_rings, npar_info->max_rx_ques, npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, npar_info->max_remote_ipv6_addrs); out: qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; int index; if (adapter->ahw->pci_func == func) { sriov->vp_handle = 0; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index < 0) return; vp = sriov->vf_info[index].vp; vp->handle = 0; } } static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter, u16 vport_handle, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; int index; if (adapter->ahw->pci_func == func) { sriov->vp_handle = vport_handle; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index < 0) return; vp = sriov->vf_info[index].vp; vp->handle = vport_handle; } } static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; int index; if (adapter->ahw->pci_func == func) { return sriov->vp_handle; } else { index = qlcnic_sriov_func_to_index(adapter, func); if (index >= 0) { vf_info = &sriov->vf_info[index]; return vf_info->vp->handle; } } return -EINVAL; } static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter, u8 flag, u16 func) { struct qlcnic_cmd_args cmd; int ret; int vpid; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT)) return -ENOMEM; if (flag) { cmd.req.arg[3] = func << 8; } else { vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); if (vpid < 0) { ret = -EINVAL; goto out; } cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1; } ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) { dev_err(&adapter->pdev->dev, "Failed %s vport, err %d for func 0x%x\n", (flag ? "enable" : "disable"), ret, func); goto out; } if (flag) { vpid = cmd.rsp.arg[2] & 0xffff; qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func); } else { qlcnic_sriov_pf_reset_vport_handle(adapter, func); } out: qlcnic_free_mbx_args(&cmd); return ret; } static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter, u8 enable) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = 0x4; if (enable) { adapter->flags |= QLCNIC_VLAN_FILTERING; cmd.req.arg[1] |= BIT_16; if (qlcnic_84xx_check(adapter)) cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; } else { adapter->flags &= ~QLCNIC_VLAN_FILTERING; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to configure VLAN filtering, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } /* On configuring VF flood bit, PFD will receive traffic from all VFs */ static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter) { struct qlcnic_cmd_args cmd; int err; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to configure VF Flood bit on PF, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter, u8 func, u8 enable) { struct qlcnic_cmd_args cmd; int err = -EIO; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH)) return -ENOMEM; cmd.req.arg[0] |= (3 << 29); cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1; if (enable) cmd.req.arg[1] |= BIT_0; err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, "Failed to enable sriov eswitch%d\n", err); err = -EIO; } qlcnic_free_mbx_args(&cmd); return err; } static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_back_channel *bc = &sriov->bc; int i; for (i = 0; i < sriov->num_vfs; i++) cancel_work_sync(&sriov->vf_info[i].flr_work); destroy_workqueue(bc->bc_flr_wq); } static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter) { struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; struct workqueue_struct *wq; wq = create_singlethread_workqueue("qlcnic-flr"); if (wq == NULL) { dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n"); return -ENOMEM; } bc->bc_flr_wq = wq; return 0; } void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) { u8 func = adapter->ahw->pci_func; if (!qlcnic_sriov_enable_check(adapter)) return; qlcnic_sriov_pf_del_flr_queue(adapter); qlcnic_sriov_cfg_bc_intr(adapter, 0); qlcnic_sriov_pf_config_vport(adapter, 0, func); qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); __qlcnic_sriov_cleanup(adapter); adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); } void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) { if (!qlcnic_sriov_pf_check(adapter)) return; if (!qlcnic_sriov_enable_check(adapter)) return; pci_disable_sriov(adapter->pdev); netdev_info(adapter->netdev, "SR-IOV is disabled successfully on port %d\n", adapter->portnum); } static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (pci_vfs_assigned(adapter->pdev)) { netdev_err(adapter->netdev, "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n", adapter->portnum); netdev_info(adapter->netdev, "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n", adapter->portnum); return -EPERM; } qlcnic_sriov_pf_disable(adapter); rtnl_lock(); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); qlcnic_sriov_free_vlans(adapter); qlcnic_sriov_pf_cleanup(adapter); /* After disabling SRIOV re-init the driver in default mode configure opmode based on op_mode of function */ if (qlcnic_83xx_configure_opmode(adapter)) { rtnl_unlock(); return -EIO; } if (netif_running(netdev)) __qlcnic_up(adapter, netdev); rtnl_unlock(); return 0; } static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_info nic_info, pf_info, vp_info; int err; u8 func = ahw->pci_func; if (!qlcnic_sriov_enable_check(adapter)) return 0; err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1); if (err) return err; if (qlcnic_84xx_check(adapter)) { err = qlcnic_sriov_pf_cfg_flood(adapter); if (err) goto disable_vlan_filtering; } err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1); if (err) goto disable_vlan_filtering; err = qlcnic_sriov_pf_config_vport(adapter, 1, func); if (err) goto disable_eswitch; err = qlcnic_sriov_get_pf_info(adapter, &pf_info); if (err) goto delete_vport; err = qlcnic_get_nic_info(adapter, &nic_info, func); if (err) goto delete_vport; err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func); if (err) goto delete_vport; err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) goto delete_vport; ahw->physical_port = (u8) nic_info.phys_port; ahw->switch_mode = nic_info.switch_mode; ahw->max_mtu = nic_info.max_mtu; ahw->capabilities = nic_info.capabilities; ahw->nic_mode = QLC_83XX_SRIOV_MODE; return err; delete_vport: qlcnic_sriov_pf_config_vport(adapter, 0, func); disable_eswitch: qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); disable_vlan_filtering: qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); return err; } static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) { int err; if (!qlcnic_sriov_enable_check(adapter)) return 0; err = pci_enable_sriov(adapter->pdev, num_vfs); if (err) qlcnic_sriov_pf_cleanup(adapter); return err; } static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) { int err = 0; set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; err = qlcnic_sriov_init(adapter, num_vfs); if (err) goto clear_op_mode; err = qlcnic_sriov_pf_create_flr_queue(adapter); if (err) goto sriov_cleanup; err = qlcnic_sriov_pf_init(adapter); if (err) goto del_flr_queue; err = qlcnic_sriov_alloc_vlans(adapter); if (err) goto del_flr_queue; return err; del_flr_queue: qlcnic_sriov_pf_del_flr_queue(adapter); sriov_cleanup: __qlcnic_sriov_cleanup(adapter); clear_op_mode: clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; return err; } static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) { struct net_device *netdev = adapter->netdev; int err; if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { netdev_err(netdev, "SR-IOV cannot be enabled, when legacy interrupts are enabled\n"); return -EIO; } rtnl_lock(); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); err = __qlcnic_pci_sriov_enable(adapter, num_vfs); if (err) goto error; if (netif_running(netdev)) __qlcnic_up(adapter, netdev); rtnl_unlock(); err = qlcnic_sriov_pf_enable(adapter, num_vfs); if (!err) { netdev_info(netdev, "SR-IOV is enabled successfully on port %d\n", adapter->portnum); /* Return number of vfs enabled */ return num_vfs; } rtnl_lock(); if (netif_running(netdev)) __qlcnic_down(adapter, netdev); error: if (!qlcnic_83xx_configure_opmode(adapter)) { if (netif_running(netdev)) __qlcnic_up(adapter, netdev); } rtnl_unlock(); netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", adapter->portnum); return err; } int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { struct qlcnic_adapter *adapter = pci_get_drvdata(dev); int err; if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; if (num_vfs == 0) err = qlcnic_pci_sriov_disable(adapter); else err = qlcnic_pci_sriov_enable(adapter, num_vfs); clear_bit(__QLCNIC_RESETTING, &adapter->state); return err; } static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) { struct qlcnic_cmd_args cmd; struct qlcnic_vport *vp; int err, id; u8 *mac; id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) return id; vp = adapter->ahw->sriov->vf_info[id].vp; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); if (err) return err; cmd.req.arg[1] = 0x3 | func << 16; if (vp->spoofchk == true) { mac = vp->mac; cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8; cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 | mac[2] << 24; cmd.req.arg[5] = mac[1] | mac[0] << 8; } if (vp->vlan_mode == QLC_PVID_MODE) { cmd.req.arg[2] |= BIT_6; cmd.req.arg[3] |= vp->pvid << 8; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n", err); qlcnic_free_mbx_args(&cmd); return err; } static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter, u16 func) { struct qlcnic_info defvp_info; int err; err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func); if (err) return -EIO; err = qlcnic_sriov_set_vf_acl(adapter, func); if (err) return err; return 0; } static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_vport *vp = vf->vp; struct qlcnic_adapter *adapter; struct qlcnic_sriov *sriov; u16 func = vf->pci_func; size_t size; int err; adapter = vf->adapter; sriov = adapter->ahw->sriov; if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { err = qlcnic_sriov_pf_config_vport(adapter, 1, func); if (!err) { err = qlcnic_sriov_set_vf_vport_info(adapter, func); if (err) qlcnic_sriov_pf_config_vport(adapter, 0, func); } } else { if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) { size = sizeof(*vf->sriov_vlans); size = size * sriov->num_allowed_vlans; memset(vf->sriov_vlans, 0, size); } err = qlcnic_sriov_pf_config_vport(adapter, 0, func); } if (err) goto err_out; cmd->rsp.arg[0] |= (1 << 25); if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else clear_bit(QLC_BC_VF_STATE, &vf->state); return err; err_out: cmd->rsp.arg[0] |= (2 << 25); return err; } static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, u16 vlan, u8 op) { struct qlcnic_cmd_args *cmd; struct qlcnic_macvlan_mbx mv; struct qlcnic_vport *vp; u8 *addr; int err; u32 *buf; int vpid; vp = vf->vp; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) goto free_cmd; cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); if (vpid < 0) { err = -EINVAL; goto free_args; } if (vlan) op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); cmd->req.arg[1] = op | (1 << 8) | (3 << 6); cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; addr = vp->mac; mv.vlan = vlan; mv.mac_addr0 = addr[0]; mv.mac_addr1 = addr[1]; mv.mac_addr2 = addr[2]; mv.mac_addr3 = addr[3]; mv.mac_addr4 = addr[4]; mv.mac_addr5 = addr[5]; buf = &cmd->req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, cmd); if (!err) return err; free_args: qlcnic_free_mbx_args(cmd); free_cmd: kfree(cmd); return err; } static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; return 0; } static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, int opcode) { struct qlcnic_sriov *sriov; u16 vlan; int i; sriov = adapter->ahw->sriov; spin_lock_bh(&vf->vlan_list_lock); if (vf->num_vlan) { for (i = 0; i < sriov->num_allowed_vlans; i++) { vlan = vf->sriov_vlans[i]; if (vlan) qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, opcode); } } spin_unlock_bh(&vf->vlan_list_lock); if (vf->vp->vlan_mode != QLC_PVID_MODE) { if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf)) return; qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode); } } static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_rcv_mbx_out *mbx_out; int err; err = qlcnic_sriov_validate_create_rx_ctx(cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[6] = vf->vp->handle; err = qlcnic_issue_cmd(adapter, cmd); if (!err) { mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1]; vf->rx_ctx_id = mbx_out->ctx_id; qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD); } else { vf->rx_ctx_id = 0; } return err; } static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; u8 type, *mac; type = cmd->req.arg[1]; switch (type) { case QLCNIC_SET_STATION_MAC: case QLCNIC_SET_FAC_DEF_MAC: cmd->rsp.arg[0] = (2 << 25); break; case QLCNIC_GET_CURRENT_MAC: cmd->rsp.arg[0] = (1 << 25); mac = vf->vp->mac; cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00); cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) | ((mac[3]) << 16 & 0xff0000) | ((mac[2]) << 24 & 0xff000000); } return 0; } static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; return 0; } static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_tx_mbx_out *mbx_out; int err; err = qlcnic_sriov_validate_create_tx_ctx(cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[5] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) { mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2]; vf->tx_ctx_id = mbx_out->ctx_id; } else { vf->tx_ctx_id = 0; } return err; } static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL); cmd->req.arg[1] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) vf->rx_ctx_id = 0; return err; } static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[0] >> 29) != 0x3) return -EINVAL; if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } cmd->req.arg[1] |= vf->vp->handle << 16; err = qlcnic_issue_cmd(adapter, cmd); if (!err) vf->tx_ctx_id = 0; return err; } static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_lro(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; cmd->req.arg[1] |= vf->vp->handle << 16; cmd->req.arg[1] |= BIT_31; err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func) return -EINVAL; if (!(cmd->req.arg[1] & BIT_16)) return -EINVAL; if ((cmd->req.arg[1] & 0xff) != 0x1) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] != vf->rx_ctx_id) return -EINVAL; if (cmd->req.arg[2] > adapter->ahw->max_mtu) return -EINVAL; return 0; } static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_mtu(adapter, vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] & BIT_31) { if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func) return -EINVAL; } else { cmd->req.arg[1] |= vf->vp->handle << 16; } return 0; } static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_get_nic_info(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if (cmd->req.arg[1] != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_rss(vf, cmd); if (err) cmd->rsp.arg[0] |= (0x6 << 25); else err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; u16 ctx_id, pkts, time; int err = -EINVAL; u8 type; type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK; ctx_id = cmd->req.arg[1] >> 16; pkts = cmd->req.arg[2] & 0xffff; time = cmd->req.arg[2] >> 16; switch (type) { case QLCNIC_INTR_COAL_TYPE_RX: if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets || time < coal->rx_time_us) goto err_label; break; case QLCNIC_INTR_COAL_TYPE_TX: if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets || time < coal->tx_time_us) goto err_label; break; default: netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n", type); return err; } return 0; err_label: netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n", vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us, vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us); netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n", ctx_id, pkts, time, type); return err; } static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_vport *vp = vf->vp; u8 op, new_op; if (!(cmd->req.arg[1] & BIT_8)) return -EINVAL; cmd->req.arg[1] |= (vf->vp->handle << 16); cmd->req.arg[1] |= BIT_31; if (vp->vlan_mode == QLC_PVID_MODE) { op = cmd->req.arg[1] & 0x7; cmd->req.arg[1] &= ~0x7; new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; cmd->req.arg[3] |= vp->pvid << 16; cmd->req.arg[1] |= new_op; } return 0; } static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; return 0; } static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; err = qlcnic_sriov_validate_linkevent(vf, cmd); if (err) { cmd->rsp.arg[0] |= (0x6 << 25); return err; } err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err; cmd->req.arg[1] |= vf->vp->handle << 16; cmd->req.arg[1] |= BIT_31; err = qlcnic_issue_cmd(adapter, cmd); return err; } static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_vport *vp = vf->vp; u8 mode = vp->vlan_mode; struct qlcnic_adapter *adapter; struct qlcnic_sriov *sriov; adapter = vf->adapter; sriov = adapter->ahw->sriov; cmd->rsp.arg[0] |= 1 << 25; /* For 84xx adapter in case of PVID , PFD should send vlan mode as * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response */ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE) return 0; switch (mode) { case QLC_GUEST_VLAN_MODE: cmd->rsp.arg[1] = mode | 1 << 8; cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16; break; case QLC_PVID_MODE: cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16; break; } return 0; } static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; u16 vlan; if (!qlcnic_sriov_check_any_vlan(vf)) return -EINVAL; vlan = cmd->req.arg[1] >> 16; if (!vf->rx_ctx_id) { qlcnic_sriov_del_vlan_id(sriov, vf, vlan); return 0; } qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL); qlcnic_sriov_del_vlan_id(sriov, vf, vlan); if (qlcnic_83xx_pf_check(adapter)) qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, QLCNIC_MAC_ADD); return 0; } static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; int err = -EIO; u16 vlan; if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf)) return err; vlan = cmd->req.arg[1] >> 16; if (!vf->rx_ctx_id) { qlcnic_sriov_add_vlan_id(sriov, vf, vlan); return 0; } if (qlcnic_83xx_pf_check(adapter)) { err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, QLCNIC_MAC_DEL); if (err) return err; } err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD); if (err) { if (qlcnic_83xx_pf_check(adapter)) qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, QLCNIC_MAC_ADD); return err; } qlcnic_sriov_add_vlan_id(sriov, vf, vlan); return err; } static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran, struct qlcnic_cmd_args *cmd) { struct qlcnic_vf_info *vf = tran->vf; struct qlcnic_adapter *adapter = vf->adapter; struct qlcnic_vport *vp = vf->vp; int err = -EIO; u8 op; if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) { cmd->rsp.arg[0] |= 2 << 25; return err; } op = cmd->req.arg[1] & 0xf; if (op) err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd); else err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd); cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25; return err; } static const int qlcnic_pf_passthru_supp_cmds[] = { QLCNIC_CMD_GET_STATISTICS, QLCNIC_CMD_GET_PORT_CONFIG, QLCNIC_CMD_GET_LINK_STATUS, QLCNIC_CMD_INIT_NIC_FUNC, QLCNIC_CMD_STOP_NIC_FUNC, }; static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd}, [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd}, [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd}, [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd}, }; static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = { {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd}, {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd}, {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd}, {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd}, {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd}, {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd}, {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd}, {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd}, {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd}, {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd}, {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd}, {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd}, {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd}, {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd}, {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd}, }; void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_cmd_args *cmd) { u8 size, cmd_op; cmd_op = trans->req_hdr->cmd_op; if (trans->req_hdr->op_type == QLC_BC_CMD) { size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr); if (cmd_op < size) { qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd); return; } } else { int i; size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr); for (i = 0; i < size; i++) { if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) { qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd); return; } } size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds); for (i = 0; i < size; i++) { if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) { qlcnic_issue_cmd(adapter, cmd); return; } } } cmd->rsp.arg[0] |= (0x9 << 25); } void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid; } void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { int vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= vpid << 16; } void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id) { u16 vpid; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, adapter->ahw->pci_func); *int_id |= (vpid << 16) | BIT_31; } static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf) { struct qlcnic_cmd_args cmd; int vpid; if (!vf->rx_ctx_id) return; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) return; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); if (vpid >= 0) { cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to delete Tx ctx in firmware for func 0x%x\n", vf->pci_func); else vf->rx_ctx_id = 0; } qlcnic_free_mbx_args(&cmd); } static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_vf_info *vf) { struct qlcnic_cmd_args cmd; int vpid; if (!vf->tx_ctx_id) return; if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) return; vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); if (vpid >= 0) { cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, "Failed to delete Tx ctx in firmware for func 0x%x\n", vf->pci_func); else vf->tx_ctx_id = 0; } qlcnic_free_mbx_args(&cmd); } static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, struct qlcnic_bc_trans *trans) { struct qlcnic_trans_list *t_list = &vf->rcv_act; unsigned long flag; spin_lock_irqsave(&t_list->lock, flag); __qlcnic_sriov_add_act_list(sriov, vf, trans); spin_unlock_irqrestore(&t_list->lock, flag); return 0; } static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf) { struct qlcnic_adapter *adapter = vf->adapter; qlcnic_sriov_cleanup_list(&vf->rcv_pend); cancel_work_sync(&vf->trans_work); qlcnic_sriov_cleanup_list(&vf->rcv_act); if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { qlcnic_sriov_del_tx_ctx(adapter, vf); qlcnic_sriov_del_rx_ctx(adapter, vf); } qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func); clear_bit(QLC_BC_VF_FLR, &vf->state); if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf, vf->flr_trans); clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state); vf->flr_trans = NULL; } } static void qlcnic_sriov_pf_process_flr(struct work_struct *work) { struct qlcnic_vf_info *vf; vf = container_of(work, struct qlcnic_vf_info, flr_work); __qlcnic_sriov_process_flr(vf); return; } static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf, work_func_t func) { if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state)) return; INIT_WORK(&vf->flr_work, func); queue_work(sriov->bc.bc_flr_wq, &vf->flr_work); } static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; set_bit(QLC_BC_VF_FLR, &vf->state); clear_bit(QLC_BC_VF_STATE, &vf->state); set_bit(QLC_BC_VF_SOFT_FLR, &vf->state); vf->flr_trans = trans; qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); netdev_info(adapter->netdev, "Software FLR for PCI func %d\n", vf->pci_func); } bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter, struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf) { struct qlcnic_bc_hdr *hdr = trans->req_hdr; if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && (hdr->op_type == QLC_BC_CMD) && test_bit(QLC_BC_VF_STATE, &vf->state)) { qlcnic_sriov_handle_soft_flr(adapter, trans, vf); return true; } return false; } void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, struct qlcnic_vf_info *vf) { struct net_device *dev = vf->adapter->netdev; struct qlcnic_vport *vp = vf->vp; if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { clear_bit(QLC_BC_VF_FLR, &vf->state); return; } if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) { netdev_info(dev, "FLR for PCI func %d in progress\n", vf->pci_func); return; } if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) memset(vf->sriov_vlans, 0, sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans); qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); } void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_sriov *sriov = ahw->sriov; struct qlcnic_vf_info *vf; u16 num_vfs = sriov->num_vfs; int i; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; vf->rx_ctx_id = 0; vf->tx_ctx_id = 0; cancel_work_sync(&vf->flr_work); __qlcnic_sriov_process_flr(vf); clear_bit(QLC_BC_VF_STATE, &vf->state); } qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func); QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8); } int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; if (!qlcnic_sriov_enable_check(adapter)) return 0; ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; err = qlcnic_sriov_pf_init(adapter); if (err) return err; dev_info(&adapter->pdev->dev, "%s: op_mode %d\n", __func__, ahw->op_mode); return err; } int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; int i, num_vfs; struct qlcnic_vf_info *vf_info; u8 *curr_mac; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; num_vfs = sriov->num_vfs; if (!is_valid_ether_addr(mac) || vf >= num_vfs) return -EINVAL; if (ether_addr_equal(adapter->mac_addr, mac)) { netdev_err(netdev, "MAC address is already in use by the PF\n"); return -EINVAL; } for (i = 0; i < num_vfs; i++) { vf_info = &sriov->vf_info[i]; if (ether_addr_equal(vf_info->vp->mac, mac)) { netdev_err(netdev, "MAC address is already in use by VF %d\n", i); return -EINVAL; } } vf_info = &sriov->vf_info[vf]; curr_mac = vf_info->vp->mac; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { netdev_err(netdev, "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", vf); return -EOPNOTSUPP; } memcpy(curr_mac, mac, netdev->addr_len); netdev_info(netdev, "MAC Address %pM is configured for VF %d\n", mac, vf); return 0; } int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; struct qlcnic_info nic_info; struct qlcnic_vport *vp; u16 vpid; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs) return -EINVAL; vf_info = &sriov->vf_info[vf]; vp = vf_info->vp; vpid = vp->handle; if (!min_tx_rate) min_tx_rate = QLC_VF_MIN_TX_RATE; if (max_tx_rate && max_tx_rate >= 10000) { netdev_err(netdev, "Invalid max Tx rate, allowed range is [%d - %d]", min_tx_rate, QLC_VF_MAX_TX_RATE); return -EINVAL; } if (!max_tx_rate) max_tx_rate = 10000; if (min_tx_rate && min_tx_rate < QLC_VF_MIN_TX_RATE) { netdev_err(netdev, "Invalid min Tx rate, allowed range is [%d - %d]", QLC_VF_MIN_TX_RATE, max_tx_rate); return -EINVAL; } if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) return -EIO; nic_info.max_tx_bw = max_tx_rate / 100; nic_info.min_tx_bw = min_tx_rate / 100; nic_info.bit_offsets = BIT_0; if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) return -EIO; } vp->max_tx_bw = max_tx_rate / 100; netdev_info(netdev, "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", max_tx_rate, vp->max_tx_bw, vf); vp->min_tx_bw = min_tx_rate / 100; netdev_info(netdev, "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", min_tx_rate, vp->min_tx_bw, vf); return 0; } int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; struct qlcnic_vport *vp; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs || qos > 7) return -EINVAL; if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; if (vlan > MAX_VLAN_ID) { netdev_err(netdev, "Invalid VLAN ID, allowed range is [0 - %d]\n", MAX_VLAN_ID); return -EINVAL; } vf_info = &sriov->vf_info[vf]; vp = vf_info->vp; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { netdev_err(netdev, "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", vf); return -EOPNOTSUPP; } memset(vf_info->sriov_vlans, 0, sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans); switch (vlan) { case 4095: vp->vlan_mode = QLC_GUEST_VLAN_MODE; break; case 0: vp->vlan_mode = QLC_NO_VLAN_MODE; vp->qos = 0; break; default: vp->vlan_mode = QLC_PVID_MODE; qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan); vp->qos = qos; vp->pvid = vlan; } netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n", vlan, qos, vf); return 0; } static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, struct qlcnic_vport *vp, int vf) { __u32 vlan = 0; switch (vp->vlan_mode) { case QLC_PVID_MODE: vlan = vp->pvid; break; case QLC_GUEST_VLAN_MODE: vlan = MAX_VLAN_ID; break; case QLC_NO_VLAN_MODE: vlan = 0; break; default: netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n", vp->vlan_mode, vf); } return vlan; } int qlcnic_sriov_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vport *vp; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs) return -EINVAL; vp = sriov->vf_info[vf].vp; memcpy(&ivi->mac, vp->mac, ETH_ALEN); ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf); ivi->qos = vp->qos; ivi->spoofchk = vp->spoofchk; if (vp->max_tx_bw == MAX_BW) ivi->max_tx_rate = 0; else ivi->max_tx_rate = vp->max_tx_bw * 100; if (vp->min_tx_bw == MIN_BW) ivi->min_tx_rate = 0; else ivi->min_tx_rate = vp->min_tx_bw * 100; ivi->vf = vf; return 0; } int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf_info; struct qlcnic_vport *vp; if (!qlcnic_sriov_pf_check(adapter)) return -EOPNOTSUPP; if (vf >= sriov->num_vfs) return -EINVAL; vf_info = &sriov->vf_info[vf]; vp = vf_info->vp; if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { netdev_err(netdev, "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", vf); return -EOPNOTSUPP; } vp->spoofchk = chk; return 0; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include "qlcnic.h" #include "qlcnic_hw.h" struct crb_addr_pair { u32 addr; u32 data; }; #define QLCNIC_MAX_CRB_XFORM 60 static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; #define crb_addr_transform(name) \ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) #define QLCNIC_ADDR_ERROR (0xffffffff) static int qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); static void crb_addr_transform_setup(void) { crb_addr_transform(XDMA); crb_addr_transform(TIMR); crb_addr_transform(SRE); crb_addr_transform(SQN3); crb_addr_transform(SQN2); crb_addr_transform(SQN1); crb_addr_transform(SQN0); crb_addr_transform(SQS3); crb_addr_transform(SQS2); crb_addr_transform(SQS1); crb_addr_transform(SQS0); crb_addr_transform(RPMX7); crb_addr_transform(RPMX6); crb_addr_transform(RPMX5); crb_addr_transform(RPMX4); crb_addr_transform(RPMX3); crb_addr_transform(RPMX2); crb_addr_transform(RPMX1); crb_addr_transform(RPMX0); crb_addr_transform(ROMUSB); crb_addr_transform(SN); crb_addr_transform(QMN); crb_addr_transform(QMS); crb_addr_transform(PGNI); crb_addr_transform(PGND); crb_addr_transform(PGN3); crb_addr_transform(PGN2); crb_addr_transform(PGN1); crb_addr_transform(PGN0); crb_addr_transform(PGSI); crb_addr_transform(PGSD); crb_addr_transform(PGS3); crb_addr_transform(PGS2); crb_addr_transform(PGS1); crb_addr_transform(PGS0); crb_addr_transform(PS); crb_addr_transform(PH); crb_addr_transform(NIU); crb_addr_transform(I2Q); crb_addr_transform(EG); crb_addr_transform(MN); crb_addr_transform(MS); crb_addr_transform(CAS2); crb_addr_transform(CAS1); crb_addr_transform(CAS0); crb_addr_transform(CAM); crb_addr_transform(C2C1); crb_addr_transform(C2C0); crb_addr_transform(SMB); crb_addr_transform(OCM0); crb_addr_transform(I2C0); } void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_rx_buffer *rx_buf; int i, ring; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; for (i = 0; i < rds_ring->num_desc; ++i) { rx_buf = &(rds_ring->rx_buf_arr[i]); if (rx_buf->skb == NULL) continue; dma_unmap_single(&adapter->pdev->dev, rx_buf->dma, rds_ring->dma_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_buf->skb); } } } void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_rx_buffer *rx_buf; int i, ring; recv_ctx = adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; INIT_LIST_HEAD(&rds_ring->free_list); rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf++; } } } void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_buffer *cmd_buf; struct qlcnic_skb_frag *buffrag; int i, j; spin_lock(&tx_ring->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { dma_unmap_single(&adapter->pdev->dev, buffrag->dma, buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { dma_unmap_page(&adapter->pdev->dev, buffrag->dma, buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } } if (cmd_buf->skb) { dev_kfree_skb_any(cmd_buf->skb); cmd_buf->skb = NULL; } cmd_buf++; } spin_unlock(&tx_ring->tx_clean_lock); } void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; int ring; recv_ctx = adapter->recv_ctx; if (recv_ctx->rds_rings == NULL) return; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; vfree(rds_ring->rx_buf_arr); rds_ring->rx_buf_arr = NULL; } kfree(recv_ctx->rds_rings); } int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) { struct qlcnic_recv_context *recv_ctx; struct qlcnic_host_rds_ring *rds_ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_rx_buffer *rx_buf; int ring, i; recv_ctx = adapter->recv_ctx; rds_ring = kcalloc(adapter->max_rds_rings, sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL); if (rds_ring == NULL) goto err_out; recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; switch (ring) { case RCV_RING_NORMAL: rds_ring->num_desc = adapter->num_rxd; rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; case RCV_RING_JUMBO: rds_ring->num_desc = adapter->num_jumbo_rxd; rds_ring->dma_size = QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; } rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) goto err_out; INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles * and put them in the queues. */ rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf->ref_handle = i; rx_buf++; } spin_lock_init(&rds_ring->lock); } for (ring = 0; ring < adapter->drv_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; if (qlcnic_82xx_check(adapter)) { if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) sds_ring->tx_ring = &adapter->tx_ring[ring]; else sds_ring->tx_ring = &adapter->tx_ring[0]; } for (i = 0; i < NUM_RCV_DESC_RINGS; i++) INIT_LIST_HEAD(&sds_ring->free_list[i]); } return 0; err_out: qlcnic_free_sw_resources(adapter); return -ENOMEM; } /* * Utility to translate from internal Phantom CRB address * to external PCI CRB address. */ static u32 qlcnic_decode_crb_addr(u32 addr) { int i; u32 base_addr, offset, pci_base; crb_addr_transform_setup(); pci_base = QLCNIC_ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == QLCNIC_ADDR_ERROR) return pci_base; else return pci_base + offset; } #define QLCNIC_MAX_ROM_WAIT_USEC 100 static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) { long timeout = 0; long done = 0; int err = 0; cond_resched(); while (done == 0) { done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err); done &= 2; if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { dev_err(&adapter->pdev->dev, "Timeout reached waiting for rom done"); return -EIO; } udelay(1); } return 0; } static int do_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) { int err = 0; QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); if (qlcnic_wait_rom_done(adapter)) { dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); return -EIO; } /* reset abyte_cnt and dummy_byte_cnt */ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); udelay(10); QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err); if (err == -EIO) return err; return 0; } static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, u8 *bytes, size_t size) { int addridx; int ret = 0; for (addridx = addr; addridx < (addr + size); addridx += 4) { int v; ret = do_rom_fast_read(adapter, addridx, &v); if (ret != 0) break; *(__le32 *)bytes = cpu_to_le32(v); bytes += 4; } return ret; } int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, u8 *bytes, size_t size) { int ret; ret = qlcnic_rom_lock(adapter); if (ret < 0) return ret; ret = do_rom_fast_read_words(adapter, addr, bytes, size); qlcnic_rom_unlock(adapter); return ret; } int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) { int ret; if (qlcnic_rom_lock(adapter) != 0) return -EIO; ret = do_rom_fast_read(adapter, addr, valp); qlcnic_rom_unlock(adapter); return ret; } int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) { int addr, err = 0; int i, n, init_delay; struct crb_addr_pair *buf; unsigned offset; u32 off, val; struct pci_dev *pdev = adapter->pdev; QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0); /* Halt all the indiviual PEGs and other blocks */ /* disable all I2Q */ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0); QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err); if (err == -EIO) return err; QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1); /* halt timers */ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0); QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0); /* halt pegs */ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); /* big hammer don't reset CAM block on reset */ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); /* Init HW CRB block */ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || qlcnic_rom_fast_read(adapter, 4, &n) != 0) { dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); return -EIO; } offset = n & 0xffffU; n = (n >> 16) & 0xffffU; if (n >= 1024) { dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); return -EIO; } buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) return -ENOMEM; for (i = 0; i < n; i++) { if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -EIO; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { off = qlcnic_decode_crb_addr(buf[i].addr); if (off == QLCNIC_ADDR_ERROR) { dev_err(&pdev->dev, "CRB init value out of range %x\n", buf[i].addr); continue; } off += QLCNIC_PCI_CRBSPACE; if (off & 1) continue; /* skipping cold reboot MAGIC */ if (off == QLCNIC_CAM_RAM(0x1fc)) continue; if (off == (QLCNIC_CRB_I2C0 + 0x1c)) continue; if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ continue; if (off == (ROMUSB_GLB + 0xa8)) continue; if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ continue; if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ continue; if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ continue; if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) continue; /* skip the function enable register */ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) continue; init_delay = 1; /* After writing this register, HW needs time for CRB */ /* to quiet down (else crb_window returns 0xffffffff) */ if (off == QLCNIC_ROMUSB_GLB_SW_RESET) init_delay = 1000; QLCWR32(adapter, off, buf[i].data); msleep(init_delay); } kfree(buf); /* Initialize protocol process engine */ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); usleep_range(1000, 1500); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); return 0; } static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) { u32 val; int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; do { val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return 0; case PHAN_INITIALIZE_FAILED: goto out_err; default: break; } msleep(QLCNIC_CMDPEG_CHECK_DELAY); } while (--retries); QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); out_err: dev_err(&adapter->pdev->dev, "Command Peg initialization not " "complete, state: 0x%x.\n", val); return -EIO; } static int qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) { u32 val; int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; do { val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE); if (val == PHAN_PEG_RCV_INITIALIZED) return 0; msleep(QLCNIC_RCVPEG_CHECK_DELAY); } while (--retries); dev_err(&adapter->pdev->dev, "Receive Peg initialization not complete, state: 0x%x.\n", val); return -EIO; } int qlcnic_check_fw_status(struct qlcnic_adapter *adapter) { int err; err = qlcnic_cmd_peg_ready(adapter); if (err) return err; err = qlcnic_receive_peg_ready(adapter); if (err) return err; QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK); return err; } int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { int timeo; u32 val; val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); val = QLC_DEV_GET_DRV(val, adapter->portnum); if ((val & 0x3) != QLCNIC_TYPE_NIC) { dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n", val); return -EIO; } adapter->ahw->physical_port = (val >> 2); if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) timeo = QLCNIC_INIT_TIMEOUT_SECS; adapter->dev_init_timeo = timeo; if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) timeo = QLCNIC_RESET_TIMEOUT_SECS; adapter->reset_ack_timeo = timeo; return 0; } static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, struct qlcnic_flt_entry *region_entry) { struct qlcnic_flt_header flt_hdr; struct qlcnic_flt_entry *flt_entry; int i = 0, ret; u32 entry_size; memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, (u8 *)&flt_hdr, sizeof(struct qlcnic_flt_header)); if (ret) { dev_warn(&adapter->pdev->dev, "error reading flash layout header\n"); return -EIO; } entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); flt_entry = vzalloc(entry_size); if (flt_entry == NULL) return -EIO; ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + sizeof(struct qlcnic_flt_header), (u8 *)flt_entry, entry_size); if (ret) { dev_warn(&adapter->pdev->dev, "error reading flash layout entries\n"); goto err_out; } while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { if (flt_entry[i].region == region) break; i++; } if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { dev_warn(&adapter->pdev->dev, "region=%x not found in %d regions\n", region, i); ret = -EIO; goto err_out; } memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); err_out: vfree(flt_entry); return ret; } int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) { struct qlcnic_flt_entry fw_entry; u32 ver = -1, min_ver; int ret; if (adapter->ahw->revision_id == QLCNIC_P3P_C0) ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION, &fw_entry); else ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION, &fw_entry); if (!ret) /* 0-4:-signature, 4-8:-fw version */ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, (int *)&ver); else qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); ver = QLCNIC_DECODE_VERSION(ver); min_ver = QLCNIC_MIN_FW_VERSION; if (ver < min_ver) { dev_err(&adapter->pdev->dev, "firmware version %d.%d.%d unsupported." "Min supported version %d.%d.%d\n", _major(ver), _minor(ver), _build(ver), _major(min_ver), _minor(min_ver), _build(min_ver)); return -EINVAL; } return 0; } static int qlcnic_has_mn(struct qlcnic_adapter *adapter) { u32 capability = 0; int err = 0; capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err); if (err == -EIO) return err; if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) return 1; return 0; } static struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) { u32 i, entries; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; entries = le32_to_cpu(directory->num_entries); for (i = 0; i < entries; i++) { u32 offs = le32_to_cpu(directory->findex) + i * le32_to_cpu(directory->entry_size); u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8)); if (tab_type == section) return (struct uni_table_desc *) &unirom[offs]; } return NULL; } #define FILEHEADER_SIZE (14 * 4) static int qlcnic_validate_header(struct qlcnic_adapter *adapter) { const u8 *unirom = adapter->fw->data; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; u32 entries, entry_size, tab_size, fw_file_size; fw_file_size = adapter->fw->size; if (fw_file_size < FILEHEADER_SIZE) return -EINVAL; entries = le32_to_cpu(directory->num_entries); entry_size = le32_to_cpu(directory->entry_size); tab_size = le32_to_cpu(directory->findex) + (entries * entry_size); if (fw_file_size < tab_size) return -EINVAL; return 0; } static int qlcnic_validate_bootld(struct qlcnic_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; u32 offs, tab_size, data_size, idx; const u8 *unirom = adapter->fw->data; __le32 temp; temp = *((__le32 *)&unirom[adapter->file_prd_off] + QLCNIC_UNI_BOOTLD_IDX_OFF); idx = le32_to_cpu(temp); tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); if (!tab_desc) return -EINVAL; tab_size = le32_to_cpu(tab_desc->findex) + le32_to_cpu(tab_desc->entry_size) * (idx + 1); if (adapter->fw->size < tab_size) return -EINVAL; offs = le32_to_cpu(tab_desc->findex) + le32_to_cpu(tab_desc->entry_size) * idx; descr = (struct uni_data_desc *)&unirom[offs]; data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int qlcnic_validate_fw(struct qlcnic_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; u32 offs, tab_size, data_size, idx; __le32 temp; temp = *((__le32 *)&unirom[adapter->file_prd_off] + QLCNIC_UNI_FIRMWARE_IDX_OFF); idx = le32_to_cpu(temp); tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); if (!tab_desc) return -EINVAL; tab_size = le32_to_cpu(tab_desc->findex) + le32_to_cpu(tab_desc->entry_size) * (idx + 1); if (adapter->fw->size < tab_size) return -EINVAL; offs = le32_to_cpu(tab_desc->findex) + le32_to_cpu(tab_desc->entry_size) * idx; descr = (struct uni_data_desc *)&unirom[offs]; data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) { struct uni_table_desc *ptab_descr; const u8 *unirom = adapter->fw->data; int mn_present = qlcnic_has_mn(adapter); u32 entries, entry_size, tab_size, i; __le32 temp; ptab_descr = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); if (!ptab_descr) return -EINVAL; entries = le32_to_cpu(ptab_descr->num_entries); entry_size = le32_to_cpu(ptab_descr->entry_size); tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size); if (adapter->fw->size < tab_size) return -EINVAL; nomn: for (i = 0; i < entries; i++) { u32 flags, file_chiprev, offs; u8 chiprev = adapter->ahw->revision_id; u32 flagbit; offs = le32_to_cpu(ptab_descr->findex) + i * le32_to_cpu(ptab_descr->entry_size); temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF); flags = le32_to_cpu(temp); temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF); file_chiprev = le32_to_cpu(temp); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { adapter->file_prd_off = offs; return 0; } } if (mn_present) { mn_present = 0; goto nomn; } return -EINVAL; } static int qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) { if (qlcnic_validate_header(adapter)) { dev_err(&adapter->pdev->dev, "unified image: header validation failed\n"); return -EINVAL; } if (qlcnic_validate_product_offs(adapter)) { dev_err(&adapter->pdev->dev, "unified image: product validation failed\n"); return -EINVAL; } if (qlcnic_validate_bootld(adapter)) { dev_err(&adapter->pdev->dev, "unified image: bootld validation failed\n"); return -EINVAL; } if (qlcnic_validate_fw(adapter)) { dev_err(&adapter->pdev->dev, "unified image: firmware validation failed\n"); return -EINVAL; } return 0; } static struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, u32 section, u32 idx_offset) { const u8 *unirom = adapter->fw->data; struct uni_table_desc *tab_desc; u32 offs, idx; __le32 temp; temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset); idx = le32_to_cpu(temp); tab_desc = qlcnic_get_table_desc(unirom, section); if (tab_desc == NULL) return NULL; offs = le32_to_cpu(tab_desc->findex) + le32_to_cpu(tab_desc->entry_size) * idx; return (struct uni_data_desc *)&unirom[offs]; } static u8 * qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) { u32 offs = QLCNIC_BOOTLD_START; struct uni_data_desc *data_desc; data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD, QLCNIC_UNI_BOOTLD_IDX_OFF); if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) offs = le32_to_cpu(data_desc->findex); return (u8 *)&adapter->fw->data[offs]; } static u8 * qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) { u32 offs = QLCNIC_IMAGE_START; struct uni_data_desc *data_desc; data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF); if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) offs = le32_to_cpu(data_desc->findex); return (u8 *)&adapter->fw->data[offs]; } static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter) { struct uni_data_desc *data_desc; const u8 *unirom = adapter->fw->data; data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF); if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) return le32_to_cpu(data_desc->size); else return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]); } static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter) { struct uni_data_desc *fw_data_desc; const struct firmware *fw = adapter->fw; u32 major, minor, sub; __le32 version_offset; const u8 *ver_str; int i, ret; if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) { version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]; return le32_to_cpu(version_offset); } fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, QLCNIC_UNI_FIRMWARE_IDX_OFF); ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) + le32_to_cpu(fw_data_desc->size) - 17; for (i = 0; i < 12; i++) { if (!strncmp(&ver_str[i], "REV=", 4)) { ret = sscanf(&ver_str[i+4], "%u.%u.%u ", &major, &minor, &sub); if (ret != 3) return 0; else return major + (minor << 8) + (sub << 16); } } return 0; } static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter) { const struct firmware *fw = adapter->fw; u32 bios_ver, prd_off = adapter->file_prd_off; u8 *version_offset; __le32 temp; if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) { version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]; return le32_to_cpu(*(__le32 *)version_offset); } temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF); bios_ver = le32_to_cpu(temp); return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) { if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); qlcnic_pcie_sem_unlock(adapter, 2); } static int qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) { u32 heartbeat, ret = -EIO; int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; adapter->heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); do { msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); if (heartbeat != adapter->heartbeat) { ret = QLCNIC_RCODE_SUCCESS; break; } } while (--retries); return ret; } int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) { if ((adapter->flags & QLCNIC_FW_HANG) || qlcnic_check_fw_hearbeat(adapter)) { qlcnic_rom_lock_recovery(adapter); return 1; } if (adapter->need_fw_reset) return 1; if (adapter->fw) return 1; return 0; } static const char *fw_name[] = { QLCNIC_UNIFIED_ROMIMAGE_NAME, QLCNIC_FLASH_ROMIMAGE_NAME, }; int qlcnic_load_firmware(struct qlcnic_adapter *adapter) { __le64 *ptr64; u32 i, flashaddr, size; const struct firmware *fw = adapter->fw; struct pci_dev *pdev = adapter->pdev; dev_info(&pdev->dev, "loading firmware from %s\n", fw_name[adapter->ahw->fw_type]); if (fw) { u64 data; size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter); flashaddr = QLCNIC_BOOTLD_START; for (i = 0; i < size; i++) { data = le64_to_cpu(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = qlcnic_get_fw_size(adapter) / 8; ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter); flashaddr = QLCNIC_IMAGE_START; for (i = 0; i < size; i++) { data = le64_to_cpu(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = qlcnic_get_fw_size(adapter) % 8; if (size) { data = le64_to_cpu(ptr64[i]); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; } } else { u64 data; u32 hi, lo; int ret; struct qlcnic_flt_entry bootld_entry; ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, &bootld_entry); if (!ret) { size = bootld_entry.size / 8; flashaddr = bootld_entry.start_addr; } else { size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; flashaddr = QLCNIC_BOOTLD_START; dev_info(&pdev->dev, "using legacy method to get flash fw region"); } for (i = 0; i < size; i++) { if (qlcnic_rom_fast_read(adapter, flashaddr, (int *)&lo) != 0) return -EIO; if (qlcnic_rom_fast_read(adapter, flashaddr + 4, (int *)&hi) != 0) return -EIO; data = (((u64)hi << 32) | lo); if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } } usleep_range(1000, 1500); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); return 0; } static int qlcnic_validate_firmware(struct qlcnic_adapter *adapter) { u32 val; u32 ver, bios, min_size; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->ahw->fw_type; if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { if (qlcnic_validate_unified_romimage(adapter)) return -EINVAL; min_size = QLCNIC_UNI_FW_MIN_SIZE; } else { val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); if (val != QLCNIC_BDINFO_MAGIC) return -EINVAL; min_size = QLCNIC_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; val = qlcnic_get_fw_version(adapter); ver = QLCNIC_DECODE_VERSION(val); if (ver < QLCNIC_MIN_FW_VERSION) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); return -EINVAL; } val = qlcnic_get_bios_version(adapter); qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); if (val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); return -EINVAL; } QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC); return 0; } static void qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) { u8 fw_type; switch (adapter->ahw->fw_type) { case QLCNIC_UNKNOWN_ROMIMAGE: fw_type = QLCNIC_UNIFIED_ROMIMAGE; break; case QLCNIC_UNIFIED_ROMIMAGE: default: fw_type = QLCNIC_FLASH_ROMIMAGE; break; } adapter->ahw->fw_type = fw_type; } void qlcnic_request_firmware(struct qlcnic_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int rc; adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; next: qlcnic_get_next_fwtype(adapter); if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) { adapter->fw = NULL; } else { rc = request_firmware(&adapter->fw, fw_name[adapter->ahw->fw_type], &pdev->dev); if (rc != 0) goto next; rc = qlcnic_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); usleep_range(1000, 1500); goto next; } } } void qlcnic_release_firmware(struct qlcnic_adapter *adapter) { release_firmware(adapter->fw); adapter->fw = NULL; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
// SPDX-License-Identifier: GPL-2.0-only /* * QLogic qlcnic NIC Driver * Copyright (c) 2009-2013 QLogic Corporation */ #include "qlcnic.h" #include "qlcnic_hw.h" static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock) { if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER); if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (lock) { if (qlcnic_83xx_lock_driver(adapter)) return -EBUSY; } QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER); ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; if (lock) qlcnic_83xx_unlock_driver(adapter); return 0; } int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) { u8 id; int ret = -EBUSY; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_83xx_lock_driver(adapter)) return ret; id = ahw->pci_func; data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) | QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id); QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); qlcnic_83xx_unlock_driver(adapter); return 0; } static void qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; if (ahw->port_type == QLCNIC_XGBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (ahw->port_type == QLCNIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; } adapter->num_txd = MAX_CMD_DESCRIPTORS; adapter->max_rds_rings = MAX_RDS_RINGS; } /** * qlcnic_83xx_init_mgmt_vnic * * @adapter: adapter structure * Management virtual NIC sets the operational mode of other vNIC's and * configures embedded switch (ESWITCH). * Returns: Success(0) or error code. * **/ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct device *dev = &adapter->pdev->dev; struct qlcnic_npar_info *npar; int i, err = -EIO; qlcnic_83xx_get_minidump_template(adapter); if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) { if (qlcnic_init_pci_info(adapter)) return err; npar = adapter->npars; for (i = 0; i < ahw->total_nic_func; i++, npar++) { dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n", npar->pci_func, npar->active, npar->type, npar->phy_port, npar->min_bw, npar->max_bw, npar->mac); } dev_info(dev, "Max functions = %d, active functions = %d\n", ahw->max_pci_func, ahw->total_nic_func); if (qlcnic_83xx_set_vnic_opmode(adapter)) return err; if (qlcnic_set_default_offload_settings(adapter)) return err; } else { if (qlcnic_reset_npar_config(adapter)) return err; } if (qlcnic_83xx_get_port_info(adapter)) return err; qlcnic_83xx_config_vnic_buff_descriptors(adapter); ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; qlcnic_83xx_enable_vnic_mode(adapter, 1); dev_info(dev, "HAL Version: %d, Management function\n", ahw->fw_hal_version); return 0; } static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter) { int err = -EIO; qlcnic_83xx_get_minidump_template(adapter); if (qlcnic_83xx_get_port_info(adapter)) return err; qlcnic_83xx_config_vnic_buff_descriptors(adapter); adapter->ahw->msix_supported = !!qlcnic_use_msi_x; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d, Privileged function\n", adapter->ahw->fw_hal_version); return 0; } static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter) { int err = -EIO; qlcnic_83xx_get_fw_version(adapter); if (qlcnic_set_eswitch_port_config(adapter)) return err; if (qlcnic_83xx_get_port_info(adapter)) return err; qlcnic_83xx_config_vnic_buff_descriptors(adapter); adapter->ahw->msix_supported = !!qlcnic_use_msi_x; adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n", adapter->ahw->fw_hal_version); return 0; } /** * qlcnic_83xx_config_vnic_opmode * * @adapter: adapter structure * Identify virtual NIC operational modes. * * Returns: Success(0) or error code. * **/ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) { u32 op_mode, priv_level; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_nic_template *nic_ops = adapter->nic_ops; qlcnic_get_func_no(adapter); op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); if (op_mode == QLC_83XX_DEFAULT_OPMODE) priv_level = QLCNIC_MGMT_FUNC; else priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, ahw->pci_func); switch (priv_level) { case QLCNIC_NON_PRIV_FUNC: ahw->op_mode = QLCNIC_NON_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; break; case QLCNIC_PRIV_FUNC: ahw->op_mode = QLCNIC_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; break; case QLCNIC_MGMT_FUNC: ahw->op_mode = QLCNIC_MGMT_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; break; default: dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); return -EIO; } if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) { adapter->flags |= QLCNIC_ESWITCH_ENABLED; if (adapter->drv_mac_learn) adapter->rx_mac_learn = true; } else { adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; adapter->rx_mac_learn = false; } ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; return 0; } int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlc_83xx_idc *idc = &ahw->idc; u32 state; state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { idc->vnic_wait_limit--; msleep(1000); state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); } if (state != QLCNIC_DEV_NPAR_OPER) { dev_err(&adapter->pdev->dev, "vNIC mode not operational, state check timed out.\n"); return -EIO; } return 0; } int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter, int func, int *port_id) { struct qlcnic_info nic_info; int err = 0; memset(&nic_info, 0, sizeof(struct qlcnic_info)); err = qlcnic_get_nic_info(adapter, &nic_info, func); if (err) return err; if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY) *port_id = nic_info.phys_port; else err = -EIO; if (!err) adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE; return err; }
linux-master
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
// SPDX-License-Identifier: GPL-2.0-only /* * New driver for Marvell Yukon 2 chipset. * Based on earlier sk98lin, and skge driver. * * This driver intentionally does not support all the features * of the original driver such as link fail-over and link management because * those should be done at higher levels. * * Copyright (C) 2005 Stephen Hemminger <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crc32.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/slab.h> #include <net/ip.h> #include <linux/tcp.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <linux/debugfs.h> #include <linux/mii.h> #include <linux/of_net.h> #include <linux/dmi.h> #include <asm/irq.h> #include "sky2.h" #define DRV_NAME "sky2" #define DRV_VERSION "1.30" /* * The Yukon II chipset takes 64 bit command blocks (called list elements) * that are organized into three (receive, transmit, status) different rings * similar to Tigon3. */ #define RX_LE_SIZE 1024 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) #define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) #define RX_DEF_PENDING RX_MAX_PENDING /* This is the worst case number of transmit list elements for a single skb: * VLAN:GSO + CKSUM + Data + skb_frags * DMA */ #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) #define TX_MIN_PENDING (MAX_SKB_TX_LE+1) #define TX_MAX_PENDING 1024 #define TX_DEF_PENDING 63 #define TX_WATCHDOG (5 * HZ) #define PHY_RETRIES 1000 #define SKY2_EEPROM_MAGIC 0x9955aabb #define RING_NEXT(x, s) (((x)+1) & ((s)-1)) static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); static int disable_msi = -1; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); static int legacy_pme = 0; module_param(legacy_pme, int, 0); MODULE_PARM_DESC(legacy_pme, "Legacy power management"); static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ { 0 } }; MODULE_DEVICE_TABLE(pci, sky2_id_table); /* Avoid conditionals by using array */ static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; static void sky2_set_multicast(struct net_device *dev); static irqreturn_t sky2_intr(int irq, void *dev_id); /* Access to PHY via serial interconnect */ static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) { int i; gma_write16(hw, port, GM_SMI_DATA, val); gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); for (i = 0; i < PHY_RETRIES; i++) { u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); if (ctrl == 0xffff) goto io_error; if (!(ctrl & GM_SMI_CT_BUSY)) return 0; udelay(10); } dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); return -ETIMEDOUT; io_error: dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); return -EIO; } static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) { int i; gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); for (i = 0; i < PHY_RETRIES; i++) { u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); if (ctrl == 0xffff) goto io_error; if (ctrl & GM_SMI_CT_RD_VAL) { *val = gma_read16(hw, port, GM_SMI_DATA); return 0; } udelay(10); } dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); return -ETIMEDOUT; io_error: dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); return -EIO; } static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } static void sky2_power_on(struct sky2_hw *hw) { /* switch power to VCC (WA for VAUX problem) */ sky2_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); /* disable Core Clock Division, */ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) /* enable bits are inverted */ sky2_write8(hw, B2_Y2_CLK_GATE, Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); else sky2_write8(hw, B2_Y2_CLK_GATE, 0); if (hw->flags & SKY2_HW_ADV_POWER_CTL) { u32 reg; sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg = sky2_pci_read32(hw, PCI_DEV_REG4); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; sky2_pci_write32(hw, PCI_DEV_REG4, reg); reg = sky2_pci_read32(hw, PCI_DEV_REG5); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; sky2_pci_write32(hw, PCI_DEV_REG5, reg); sky2_pci_write32(hw, PCI_CFG_REG_1, 0); sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); reg |= GLB_GPIO_STAT_RACE_DIS; sky2_write32(hw, B2_GP_IO, reg); sky2_read32(hw, B2_GP_IO); } /* Turn on "driver loaded" LED */ sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); } static void sky2_power_aux(struct sky2_hw *hw) { if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) sky2_write8(hw, B2_Y2_CLK_GATE, 0); else /* enable bits are inverted */ sky2_write8(hw, B2_Y2_CLK_GATE, Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); /* switch power to VAUX if supported and PME from D3cold */ if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && pci_pme_capable(hw->pdev, PCI_D3cold)) sky2_write8(hw, B0_POWER_CTRL, (PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF)); /* turn off "driver loaded LED" */ sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); } static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) { u16 reg; /* disable all GMAC IRQ's */ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ gma_write16(hw, port, GM_MC_ADDR_H2, 0); gma_write16(hw, port, GM_MC_ADDR_H3, 0); gma_write16(hw, port, GM_MC_ADDR_H4, 0); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; gma_write16(hw, port, GM_RX_CTRL, reg); } /* flow control to advertise bits */ static const u16 copper_fc_adv[] = { [FC_NONE] = 0, [FC_TX] = PHY_M_AN_ASP, [FC_RX] = PHY_M_AN_PC, [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, }; /* flow control to advertise bits when using 1000BaseX */ static const u16 fiber_fc_adv[] = { [FC_NONE] = PHY_M_P_NO_PAUSE_X, [FC_TX] = PHY_M_P_ASYM_MD_X, [FC_RX] = PHY_M_P_SYM_MD_X, [FC_BOTH] = PHY_M_P_BOTH_MD_X, }; /* flow control to GMA disable bits */ static const u16 gm_fc_disable[] = { [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, [FC_TX] = GM_GPCR_FC_RX_DIS, [FC_RX] = GM_GPCR_FC_TX_DIS, [FC_BOTH] = 0, }; static void sky2_phy_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && !(hw->flags & SKY2_HW_NEWER_PHY)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ if (hw->chip_id == CHIP_ID_YUKON_EC) /* set downshift counter to 3x and enable downshift */ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; else /* set master & slave downshift counter to 1x */ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); } ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); if (sky2_is_copper(hw)) { if (!(hw->flags & SKY2_HW_GIGABIT)) { /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { u16 spec; /* Enable Class A driver for FE+ A0 */ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); spec |= PHY_M_FESC_SEL_CL_A; gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); } } else { /* disable energy detect */ ctrl &= ~PHY_M_PC_EN_DET_MSK; /* enable automatic crossover */ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); /* downshift on PHY 88E1112 and 88E1149 is changed */ if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && (hw->flags & SKY2_HW_NEWER_PHY)) { /* set downshift counter to 3x and enable downshift */ ctrl &= ~PHY_M_PC_DSC_MSK; ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; } } } else { /* workaround for deviation #4.88 (CRC errors) */ /* disable Automatic Crossover */ ctrl &= ~PHY_M_PC_MDIX_MSK; } gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* special setup for PHY 88E1112 Fiber */ if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl &= ~PHY_M_MAC_MD_MSK; ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); if (hw->pmd_type == 'P') { /* select page 1 to access Fiber registers */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); /* for SFP-module set SIGDET polarity to low */ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl |= PHY_M_FIB_SIGD_POL; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); } gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } ctrl = PHY_CT_RESET; ct1000 = 0; adv = PHY_AN_CSMA; reg = 0; if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { if (sky2_is_copper(hw)) { if (sky2->advertising & ADVERTISED_1000baseT_Full) ct1000 |= PHY_M_1000C_AFD; if (sky2->advertising & ADVERTISED_1000baseT_Half) ct1000 |= PHY_M_1000C_AHD; if (sky2->advertising & ADVERTISED_100baseT_Full) adv |= PHY_M_AN_100_FD; if (sky2->advertising & ADVERTISED_100baseT_Half) adv |= PHY_M_AN_100_HD; if (sky2->advertising & ADVERTISED_10baseT_Full) adv |= PHY_M_AN_10_FD; if (sky2->advertising & ADVERTISED_10baseT_Half) adv |= PHY_M_AN_10_HD; } else { /* special defines for FIBER (88E1040S only) */ if (sky2->advertising & ADVERTISED_1000baseT_Full) adv |= PHY_M_AN_1000X_AFD; if (sky2->advertising & ADVERTISED_1000baseT_Half) adv |= PHY_M_AN_1000X_AHD; } /* Restart Auto-negotiation */ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; } else { /* forced speed/duplex settings */ ct1000 = PHY_M_1000C_MSE; /* Disable auto update for duplex flow control and duplex */ reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; switch (sky2->speed) { case SPEED_1000: ctrl |= PHY_CT_SP1000; reg |= GM_GPCR_SPEED_1000; break; case SPEED_100: ctrl |= PHY_CT_SP100; reg |= GM_GPCR_SPEED_100; break; } if (sky2->duplex == DUPLEX_FULL) { reg |= GM_GPCR_DUP_FULL; ctrl |= PHY_CT_DUP_MD; } else if (sky2->speed < SPEED_1000) sky2->flow_mode = FC_NONE; } if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { if (sky2_is_copper(hw)) adv |= copper_fc_adv[sky2->flow_mode]; else adv |= fiber_fc_adv[sky2->flow_mode]; } else { reg |= GM_GPCR_AU_FCT_DIS; reg |= gm_fc_disable[sky2->flow_mode]; /* Forward pause packets to GMAC? */ if (sky2->flow_mode & FC_RX) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); else sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); } gma_write16(hw, port, GM_GP_CTRL, reg); if (hw->flags & SKY2_HW_GIGABIT) gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); /* Setup Phy LED's */ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); ledover = 0; switch (hw->chip_id) { case CHIP_ID_YUKON_FE: /* on 88E3082 these bits are at 11..9 (shifted left) */ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); /* delete ACT LED control bits */ ctrl &= ~PHY_M_FELP_LED1_MSK; /* change ACT LED control to blink mode */ ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); break; case CHIP_ID_YUKON_FE_P: /* Enable Link Partner Next Page */ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl |= PHY_M_PC_ENA_LIP_NP; /* disable Energy Detect and enable scrambler */ ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); break; case CHIP_ID_YUKON_XL: pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* select page 3 to access LED control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); /* set LED Function Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ /* set Polarity Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_STAT, (PHY_M_POLC_LS1_P_MIX(4) | PHY_M_POLC_IS0_P_MIX(4) | PHY_M_POLC_LOS_CTRL(2) | PHY_M_POLC_INIT_CTRL(2) | PHY_M_POLC_STA1_CTRL(2) | PHY_M_POLC_STA0_CTRL(2))); /* restore page register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); break; case CHIP_ID_YUKON_EC_U: case CHIP_ID_YUKON_EX: case CHIP_ID_YUKON_SUPR: pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); /* select page 3 to access LED control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); /* set LED Function Control register */ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ /* set Blink Rate in LED Timer Control Register */ gm_phy_write(hw, port, PHY_MARV_INT_MASK, ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); /* restore page register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); break; default: /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; /* turn off the Rx LED (LED_RX) */ ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); } if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { /* apply fixes in PHY AFE */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); /* increase differential signal amplitude in 10BASE-T */ gm_phy_write(hw, port, 0x18, 0xaa99); gm_phy_write(hw, port, 0x17, 0x2011); if (hw->chip_id == CHIP_ID_YUKON_EC_U) { /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ gm_phy_write(hw, port, 0x18, 0xa204); gm_phy_write(hw, port, 0x17, 0x2002); } /* set page register to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { /* apply workaround for integrated resistors calibration */ gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { /* apply fixes in PHY AFE */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); /* apply RDAC termination workaround */ gm_phy_write(hw, port, 24, 0x2800); gm_phy_write(hw, port, 23, 0x2001); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } else if (hw->chip_id != CHIP_ID_YUKON_EX && hw->chip_id < CHIP_ID_YUKON_SUPR) { /* no effect on Yukon-XL */ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || sky2->speed == SPEED_100) { /* turn on 100 Mbps LED (LED_LINK100) */ ledover |= PHY_M_LED_MO_100(MO_LED_ON); } if (ledover) gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); } else if (hw->chip_id == CHIP_ID_YUKON_PRM && (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) { int i; /* This a phy register setup workaround copied from vendor driver. */ static const struct { u16 reg, val; } eee_afe[] = { { 0x156, 0x58ce }, { 0x153, 0x99eb }, { 0x141, 0x8064 }, /* { 0x155, 0x130b },*/ { 0x000, 0x0000 }, { 0x151, 0x8433 }, { 0x14b, 0x8c44 }, { 0x14c, 0x0f90 }, { 0x14f, 0x39aa }, /* { 0x154, 0x2f39 },*/ { 0x14d, 0xba33 }, { 0x144, 0x0048 }, { 0x152, 0x2010 }, /* { 0x158, 0x1223 },*/ { 0x140, 0x4444 }, { 0x154, 0x2f3b }, { 0x158, 0xb203 }, { 0x157, 0x2029 }, }; /* Start Workaround for OptimaEEE Rev.Z0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb); gm_phy_write(hw, port, 1, 0x4099); gm_phy_write(hw, port, 3, 0x1120); gm_phy_write(hw, port, 11, 0x113c); gm_phy_write(hw, port, 14, 0x8100); gm_phy_write(hw, port, 15, 0x112a); gm_phy_write(hw, port, 17, 0x1008); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc); gm_phy_write(hw, port, 1, 0x20b0); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); for (i = 0; i < ARRAY_SIZE(eee_afe); i++) { /* apply AFE settings */ gm_phy_write(hw, port, 17, eee_afe[i].val); gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13); } /* End Workaround for OptimaEEE */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); /* Enable 10Base-Te (EEE) */ if (hw->chip_id >= CHIP_ID_YUKON_PRM) { reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, reg | PHY_M_10B_TE_ENABLE); } } /* Enable phy interrupt on auto-negotiation complete (or link up) */ if (sky2->flags & SKY2_FLAG_AUTO_SPEED) gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); else gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); } static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) { u32 reg1; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 &= ~phy_power[port]; if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) reg1 |= coma_mode[port]; sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_pci_read32(hw, PCI_DEV_REG1); if (hw->chip_id == CHIP_ID_YUKON_FE) gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); else if (hw->flags & SKY2_HW_ADV_POWER_CTL) sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); } static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) { u32 reg1; u16 ctrl; /* release GPHY Control reset */ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); /* release GMAC reset */ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); if (hw->flags & SKY2_HW_NEWER_PHY) { /* select page 2 to access MAC control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); /* allow GMII Power Down */ ctrl &= ~PHY_M_MAC_GMIF_PUP; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } /* setup General Purpose Control Register */ gma_write16(hw, port, GM_GP_CTRL, GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | GM_GPCR_AU_SPD_DIS); if (hw->chip_id != CHIP_ID_YUKON_EC) { if (hw->chip_id == CHIP_ID_YUKON_EC_U) { /* select page 2 to access MAC control register */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); /* enable Power Down */ ctrl |= PHY_M_PC_POW_D_ENA; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); /* set page register back to 0 */ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); } /* set IEEE compatible Power Down Mode (dev. #4.99) */ gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); } sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } /* configure IPG according to used link speed */ static void sky2_set_ipg(struct sky2_port *sky2) { u16 reg; reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); reg &= ~GM_SMOD_IPG_MSK; if (sky2->speed > SPEED_100) reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000); else reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); } /* Enable Rx/Tx */ static void sky2_enable_rx_tx(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 reg; reg = gma_read16(hw, port, GM_GP_CTRL); reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; gma_write16(hw, port, GM_GP_CTRL, reg); } /* Force a renegotiation */ static void sky2_phy_reinit(struct sky2_port *sky2) { spin_lock_bh(&sky2->phy_lock); sky2_phy_init(sky2->hw, sky2->port); sky2_enable_rx_tx(sky2); spin_unlock_bh(&sky2->phy_lock); } /* Put device in state to listen for Wake On Lan */ static void sky2_wol_init(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; enum flow_control save_mode; u16 ctrl; /* Bring hardware out of reset */ sky2_write16(hw, B0_CTST, CS_RST_CLR); sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); /* Force to 10/100 * sky2_reset will re-enable on resume */ save_mode = sky2->flow_mode; ctrl = sky2->advertising; sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); sky2->flow_mode = FC_NONE; spin_lock_bh(&sky2->phy_lock); sky2_phy_power_up(hw, port); sky2_phy_init(hw, port); spin_unlock_bh(&sky2->phy_lock); sky2->flow_mode = save_mode; sky2->advertising = ctrl; /* Set GMAC to no flow control and auto update for speed/duplex */ gma_write16(hw, port, GM_GP_CTRL, GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); /* Set WOL address */ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), sky2->netdev->dev_addr, ETH_ALEN); /* Turn on appropriate WOL control bits */ sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); ctrl = 0; if (sky2->wol & WAKE_PHY) ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; if (sky2->wol & WAKE_MAGIC) ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Disable PiG firmware */ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ if (legacy_pme) { u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 |= PCI_Y2_PME_LEGACY; sky2_pci_write32(hw, PCI_DEV_REG1, reg1); } /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); sky2_read32(hw, B0_CTST); } static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; if ( (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) || hw->chip_id >= CHIP_ID_YUKON_FE_P) { /* Yukon-Extreme B0 and further Extreme devices */ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); } else if (dev->mtu > ETH_DATA_LEN) { /* set Tx GMAC FIFO Almost Empty Threshold */ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), (ECU_JUMBO_WM << 16) | ECU_AE_THR); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); } else sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); } static void sky2_mac_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 reg; u32 rx_reg; int i; const u8 *addr = hw->dev[port]->dev_addr; sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == CHIP_REV_YU_XL_A0 && port == 1) { /* WA DEV_472 -- looks like crossed wires on port 2 */ /* clear GMAC 1 Control reset */ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); do { sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); } sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); /* Enable Transmit FIFO Underrun */ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); spin_lock_bh(&sky2->phy_lock); sky2_phy_power_up(hw, port); sky2_phy_init(hw, port); spin_unlock_bh(&sky2->phy_lock); /* MIB clear */ reg = gma_read16(hw, port, GM_PHY_ADDR); gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) gma_read16(hw, port, i); gma_write16(hw, port, GM_PHY_ADDR, reg); /* transmit control */ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); /* receive control reg: unicast + multicast + no FCS */ gma_write16(hw, port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); /* transmit flow control */ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); /* transmit parameter */ gma_write16(hw, port, GM_TX_PARAM, TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); /* serial mode register */ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000); if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_U_B1) reg |= GM_NEW_FLOW_CTRL; gma_write16(hw, port, GM_SERIAL_MODE, reg); /* virtual address for data */ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); /* physical address: used for pause frames */ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); /* ignore counter overflows */ gma_write16(hw, port, GM_TX_IRQ_MSK, 0); gma_write16(hw, port, GM_RX_IRQ_MSK, 0); gma_write16(hw, port, GM_TR_IRQ_MSK, 0); /* Configure Rx MAC FIFO */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_FE_P) rx_reg |= GMF_RX_OVER_ON; sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); if (hw->chip_id == CHIP_ID_YUKON_XL) { /* Hardware errata - clear flush mask */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); } else { /* Flush Rx MAC FIFO on any flow control or error */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); } /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ reg = RX_GMF_FL_THR_DEF + 1; /* Another magic mystery workaround from sk98lin */ if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) reg = 0x178; sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); /* Configure Tx MAC FIFO */ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); /* On chips without ram buffer, pause is controlled by MAC level */ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { /* Pause threshold is scaled by 8 in bytes */ if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) reg = 1568 / 8; else reg = 1024 / 8; sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); sky2_set_tx_stfwd(hw, port); } if (hw->chip_id == CHIP_ID_YUKON_FE_P && hw->chip_rev == CHIP_REV_YU_FE2_A0) { /* disable dynamic watermark */ reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); reg &= ~TX_DYN_WM_ENA; sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); } } /* Assign Ram Buffer allocation to queue */ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) { u32 end; /* convert from K bytes to qwords used for hw register */ start *= 1024/8; space *= 1024/8; end = start + space - 1; sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); sky2_write32(hw, RB_ADDR(q, RB_START), start); sky2_write32(hw, RB_ADDR(q, RB_END), end); sky2_write32(hw, RB_ADDR(q, RB_WP), start); sky2_write32(hw, RB_ADDR(q, RB_RP), start); if (q == Q_R1 || q == Q_R2) { u32 tp = space - space/4; /* On receive queue's set the thresholds * give receiver priority when > 3/4 full * send pause when down to 2K */ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); tp = space - 8192/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { /* Enable store & forward on Tx queue's because * Tx FIFO is only 1K on Yukon */ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); } sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); sky2_read8(hw, RB_ADDR(q, RB_CTRL)); } /* Setup Bus Memory Interface */ static void sky2_qset(struct sky2_hw *hw, u16 q) { sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); } /* Setup prefetch unit registers. This is the interface between * hardware and driver list elements */ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, dma_addr_t addr, u32 last) { sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); } static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) { struct sky2_tx_le *le = sky2->tx_le + *slot; *slot = RING_NEXT(*slot, sky2->tx_ring_size); le->ctrl = 0; return le; } static void tx_init(struct sky2_port *sky2) { struct sky2_tx_le *le; sky2->tx_prod = sky2->tx_cons = 0; sky2->tx_tcpsum = 0; sky2->tx_last_mss = 0; netdev_reset_queue(sky2->netdev); le = get_tx_le(sky2, &sky2->tx_prod); le->addr = 0; le->opcode = OP_ADDR64 | HW_OWNER; sky2->tx_last_upper = 0; } /* Update chip's next pointer */ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) { /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); } static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) { struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); le->ctrl = 0; return le; } static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) { unsigned size; /* Space needed for frame data + headers rounded up */ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); /* Stopping point for hardware truncation */ return (size - 8) / sizeof(u32); } static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) { struct rx_ring_info *re; unsigned size; /* Space needed for frame data + headers rounded up */ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); sky2->rx_nfrags = size >> PAGE_SHIFT; BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); /* Compute residue after pages */ size -= sky2->rx_nfrags << PAGE_SHIFT; /* Optimize to handle small packets and headers */ if (size < copybreak) size = copybreak; if (size < ETH_HLEN) size = ETH_HLEN; return size; } /* Build description to hardware for one receive segment */ static void sky2_rx_add(struct sky2_port *sky2, u8 op, dma_addr_t map, unsigned len) { struct sky2_rx_le *le; if (sizeof(dma_addr_t) > sizeof(u32)) { le = sky2_next_rx(sky2); le->addr = cpu_to_le32(upper_32_bits(map)); le->opcode = OP_ADDR64 | HW_OWNER; } le = sky2_next_rx(sky2); le->addr = cpu_to_le32(lower_32_bits(map)); le->length = cpu_to_le16(len); le->opcode = op | HW_OWNER; } /* Build description to hardware for one possibly fragmented skb */ static void sky2_rx_submit(struct sky2_port *sky2, const struct rx_ring_info *re) { int i; sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); } static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, unsigned size) { struct sk_buff *skb = re->skb; int i; re->data_addr = dma_map_single(&pdev->dev, skb->data, size, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, re->data_addr)) goto mapping_error; dma_unmap_len_set(re, data_size, size); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) goto map_page_error; } return 0; map_page_error: while (--i >= 0) { dma_unmap_page(&pdev->dev, re->frag_addr[i], skb_frag_size(&skb_shinfo(skb)->frags[i]), DMA_FROM_DEVICE); } dma_unmap_single(&pdev->dev, re->data_addr, dma_unmap_len(re, data_size), DMA_FROM_DEVICE); mapping_error: if (net_ratelimit()) dev_warn(&pdev->dev, "%s: rx mapping error\n", skb->dev->name); return -EIO; } static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) { struct sk_buff *skb = re->skb; int i; dma_unmap_single(&pdev->dev, re->data_addr, dma_unmap_len(re, data_size), DMA_FROM_DEVICE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) dma_unmap_page(&pdev->dev, re->frag_addr[i], skb_frag_size(&skb_shinfo(skb)->frags[i]), DMA_FROM_DEVICE); } /* Tell chip where to start receive checksum. * Actually has two checksums, but set both same to avoid possible byte * order problems. */ static void rx_set_checksum(struct sky2_port *sky2) { struct sky2_rx_le *le = sky2_next_rx(sky2); le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); le->ctrl = 0; le->opcode = OP_TCPSTART | HW_OWNER; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), (sky2->netdev->features & NETIF_F_RXCSUM) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } /* Enable/disable receive hash calculation (RSS) */ static void rx_set_rss(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; int i, nkeys = 4; /* Supports IPv6 and other modes */ if (hw->flags & SKY2_HW_NEW_LE) { nkeys = 10; sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); } /* Program RSS initial values */ if (features & NETIF_F_RXHASH) { u32 rss_key[10]; netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < nkeys; i++) sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), rss_key[i]); /* Need to turn on (undocumented) flag to make hashing work */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_STFW_ENA); sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_ENA_RX_RSS_HASH); } else sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_DIS_RX_RSS_HASH); } /* * The RX Stop command will not work for Yukon-2 if the BMU does not * reach the end of packet and since we can't make sure that we have * incoming data, we must reset the BMU while it is not doing a DMA * transfer. Since it is possible that the RX path is still active, * the RX RAM buffer will be stopped first, so any possible incoming * data will not trigger a DMA. After the RAM buffer is stopped, the * BMU is polled until any DMA in progress is ended and only then it * will be reset. */ static void sky2_rx_stop(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned rxq = rxqaddr[sky2->port]; int i; /* disable the RAM Buffer receive queue */ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); for (i = 0; i < 0xffff; i++) if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) goto stopped; netdev_warn(sky2->netdev, "receiver stop failed\n"); stopped: sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); } /* Clean out receive buffer area, assumes receiver hardware stopped */ static void sky2_rx_clean(struct sky2_port *sky2) { unsigned i; if (sky2->rx_le) memset(sky2->rx_le, 0, RX_LE_BYTES); for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; if (re->skb) { sky2_rx_unmap_skb(sky2->hw->pdev, re); kfree_skb(re->skb); re->skb = NULL; } } } /* Basic MII support */ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; int err = -EOPNOTSUPP; if (!netif_running(dev)) return -ENODEV; /* Phy still in reset */ switch (cmd) { case SIOCGMIIPHY: data->phy_id = PHY_ADDR_MARV; fallthrough; case SIOCGMIIREG: { u16 val = 0; spin_lock_bh(&sky2->phy_lock); err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); spin_unlock_bh(&sky2->phy_lock); data->val_out = val; break; } case SIOCSMIIREG: spin_lock_bh(&sky2->phy_lock); err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&sky2->phy_lock); break; } return err; } #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; u16 port = sky2->port; if (features & NETIF_F_HW_VLAN_CTAG_RX) sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); else sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); if (features & NETIF_F_HW_VLAN_CTAG_TX) { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); dev->vlan_features |= SKY2_VLAN_OFFLOADS; } else { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); /* Can't do transmit offload of vlan without hw vlan */ dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; } } /* Amount of required worst case padding in rx buffer */ static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) { return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; } /* * Allocate an skb for receiving. If the MTU is large enough * make the skb non-linear with a fragment list of pages. */ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) { struct sk_buff *skb; int i; skb = __netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + sky2_rx_pad(sky2->hw), gfp); if (!skb) goto nomem; if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { unsigned char *start; /* * Workaround for a bug in FIFO that cause hang * if the FIFO if the receive buffer is not 64 byte aligned. * The buffer returned from netdev_alloc_skb is * aligned except if slab debugging is enabled. */ start = PTR_ALIGN(skb->data, 8); skb_reserve(skb, start - skb->data); } else skb_reserve(skb, NET_IP_ALIGN); for (i = 0; i < sky2->rx_nfrags; i++) { struct page *page = alloc_page(gfp); if (!page) goto free_partial; skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); } return skb; free_partial: kfree_skb(skb); nomem: return NULL; } static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) { sky2_put_idx(sky2->hw, rxq, sky2->rx_put); } static int sky2_alloc_rx_skbs(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned i; sky2->rx_data_size = sky2_get_rx_data_size(sky2); /* Fill Rx ring */ for (i = 0; i < sky2->rx_pending; i++) { struct rx_ring_info *re = sky2->rx_ring + i; re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); if (!re->skb) return -ENOMEM; if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { dev_kfree_skb(re->skb); re->skb = NULL; return -ENOMEM; } } return 0; } /* * Setup receiver buffer pool. * Normal case this ends up creating one list element for skb * in the receive ring. Worst case if using large MTU and each * allocation falls on a different 64 bit region, that results * in 6 list elements per ring entry. * One element is used for checksum enable/disable, and one * extra to avoid wrap. */ static void sky2_rx_start(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; struct rx_ring_info *re; unsigned rxq = rxqaddr[sky2->port]; unsigned i, thresh; sky2->rx_put = sky2->rx_next = 0; sky2_qset(hw, rxq); /* On PCI express lowering the watermark gives better performance */ if (pci_is_pcie(hw->pdev)) sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); /* These chips have no ram buffer? * MAC Rx RAM Read is controlled by hardware */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev > CHIP_REV_YU_EC_U_A0) sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); if (!(hw->flags & SKY2_HW_NEW_LE)) rx_set_checksum(sky2); if (!(hw->flags & SKY2_HW_RSS_BROKEN)) rx_set_rss(sky2->netdev, sky2->netdev->features); /* submit Rx ring */ for (i = 0; i < sky2->rx_pending; i++) { re = sky2->rx_ring + i; sky2_rx_submit(sky2, re); } /* * The receiver hangs if it receives frames larger than the * packet buffer. As a workaround, truncate oversize frames, but * the register is limited to 9 bits, so if you do frames > 2052 * you better get the MTU right! */ thresh = sky2_get_rx_threshold(sky2); if (thresh > 0x1ff) sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); else { sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); } /* Tell chip about available buffers */ sky2_rx_update(sky2, rxq); if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { /* * Disable flushing of non ASF packets; * must be done after initializing the BMUs; * drivers without ASF support should do this too, otherwise * it may happen that they cannot run on ASF devices; * remember that the MAC FIFO isn't reset during initialization. */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); } if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { /* Enable RX Home Address & Routing Header checksum fix */ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); /* Enable TX Home Address & Routing Header checksum fix */ sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); } } static int sky2_alloc_buffers(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; /* must be power of 2 */ sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev, sky2->tx_ring_size * sizeof(struct sky2_tx_le), &sky2->tx_le_map, GFP_KERNEL); if (!sky2->tx_le) goto nomem; sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), GFP_KERNEL); if (!sky2->tx_ring) goto nomem; sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES, &sky2->rx_le_map, GFP_KERNEL); if (!sky2->rx_le) goto nomem; sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), GFP_KERNEL); if (!sky2->rx_ring) goto nomem; return sky2_alloc_rx_skbs(sky2); nomem: return -ENOMEM; } static void sky2_free_buffers(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; sky2_rx_clean(sky2); if (sky2->rx_le) { dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le, sky2->rx_le_map); sky2->rx_le = NULL; } if (sky2->tx_le) { dma_free_coherent(&hw->pdev->dev, sky2->tx_ring_size * sizeof(struct sky2_tx_le), sky2->tx_le, sky2->tx_le_map); sky2->tx_le = NULL; } kfree(sky2->tx_ring); kfree(sky2->rx_ring); sky2->tx_ring = NULL; sky2->rx_ring = NULL; } static void sky2_hw_up(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 ramsize; int cap; struct net_device *otherdev = hw->dev[sky2->port^1]; tx_init(sky2); /* * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions */ if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { u16 cmd; cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); } sky2_mac_init(hw, port); /* Register is number of 4K blocks on internal RAM buffer. */ ramsize = sky2_read8(hw, B2_E_0) * 4; if (ramsize > 0) { u32 rxspace; netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); if (ramsize < 16) rxspace = ramsize / 2; else rxspace = 8 + (2*(ramsize - 16))/3; sky2_ramset(hw, rxqaddr[port], 0, rxspace); sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); /* Make sure SyncQ is disabled */ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), RB_RST_SET); } sky2_qset(hw, txqaddr[port]); /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); /* Set almost empty threshold */ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_U_A0) sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2->tx_ring_size - 1); sky2_vlan_mode(sky2->netdev, sky2->netdev->features); netdev_update_features(sky2->netdev); sky2_rx_start(sky2); } /* Setup device IRQ and enable napi to process */ static int sky2_setup_irq(struct sky2_hw *hw, const char *name) { struct pci_dev *pdev = hw->pdev; int err; err = request_irq(pdev->irq, sky2_intr, (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, name, hw); if (err) dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); else { hw->flags |= SKY2_HW_IRQ_SETUP; napi_enable(&hw->napi); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); sky2_read32(hw, B0_IMSK); } return err; } /* Bring up network interface. */ static int sky2_open(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 imask; int err; netif_carrier_off(dev); err = sky2_alloc_buffers(sky2); if (err) goto err_out; /* With single port, IRQ is setup when device is brought up */ if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name))) goto err_out; sky2_hw_up(sky2); /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); if (hw->chip_id == CHIP_ID_YUKON_OPT || hw->chip_id == CHIP_ID_YUKON_PRM || hw->chip_id == CHIP_ID_YUKON_OP_2) imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ imask |= portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); netif_info(sky2, ifup, dev, "enabling interface\n"); return 0; err_out: sky2_free_buffers(sky2); return err; } /* Modular subtraction in ring */ static inline int tx_inuse(const struct sky2_port *sky2) { return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); } /* Number of list elements available for next tx */ static inline int tx_avail(const struct sky2_port *sky2) { return sky2->tx_pending - tx_inuse(sky2); } /* Estimate of number of transmit list elements required */ static unsigned tx_le_req(const struct sk_buff *skb) { unsigned count; count = (skb_shinfo(skb)->nr_frags + 1) * (sizeof(dma_addr_t) / sizeof(u32)); if (skb_is_gso(skb)) ++count; else if (sizeof(dma_addr_t) == sizeof(u32)) ++count; /* possible vlan */ if (skb->ip_summed == CHECKSUM_PARTIAL) ++count; return count; } static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr), dma_unmap_len(re, maplen), DMA_TO_DEVICE); else if (re->flags & TX_MAP_PAGE) dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr), dma_unmap_len(re, maplen), DMA_TO_DEVICE); re->flags = 0; } /* * Put one packet in ring for transmit. * A single packet can generate multiple list elements, and * the number of ring elements will probably be less than the number * of list elements used. */ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; struct sky2_tx_le *le = NULL; struct tx_ring_info *re; unsigned i, len; dma_addr_t mapping; u32 upper; u16 slot; u16 mss; u8 ctrl; if (unlikely(tx_avail(sky2) < tx_le_req(skb))) return NETDEV_TX_BUSY; len = skb_headlen(skb); mapping = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, mapping)) goto mapping_error; slot = sky2->tx_prod; netif_printk(sky2, tx_queued, KERN_DEBUG, dev, "tx queued, slot %u, len %d\n", slot, skb->len); /* Send high bits if needed */ upper = upper_32_bits(mapping); if (upper != sky2->tx_last_upper) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(upper); sky2->tx_last_upper = upper; le->opcode = OP_ADDR64 | HW_OWNER; } /* Check for TCP Segmentation Offload */ mss = skb_shinfo(skb)->gso_size; if (mss != 0) { if (!(hw->flags & SKY2_HW_NEW_LE)) mss += skb_tcp_all_headers(skb); if (mss != sky2->tx_last_mss) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(mss); if (hw->flags & SKY2_HW_NEW_LE) le->opcode = OP_MSS | HW_OWNER; else le->opcode = OP_LRGLEN | HW_OWNER; sky2->tx_last_mss = mss; } } ctrl = 0; /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ if (skb_vlan_tag_present(skb)) { if (!le) { le = get_tx_le(sky2, &slot); le->addr = 0; le->opcode = OP_VLAN|HW_OWNER; } else le->opcode |= OP_VLAN; le->length = cpu_to_be16(skb_vlan_tag_get(skb)); ctrl |= INS_VLAN; } /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { /* On Yukon EX (some versions) encoding change. */ if (hw->flags & SKY2_HW_AUTO_TX_SUM) ctrl |= CALSUM; /* auto checksum */ else { const unsigned offset = skb_transport_offset(skb); u32 tcpsum; tcpsum = offset << 16; /* sum start */ tcpsum |= offset + skb->csum_offset; /* sum write */ ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; if (ip_hdr(skb)->protocol == IPPROTO_UDP) ctrl |= UDPTCP; if (tcpsum != sky2->tx_tcpsum) { sky2->tx_tcpsum = tcpsum; le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(tcpsum); le->length = 0; /* initial checksum value */ le->ctrl = 1; /* one packet */ le->opcode = OP_TCPLISW | HW_OWNER; } } } re = sky2->tx_ring + slot; re->flags = TX_MAP_SINGLE; dma_unmap_addr_set(re, mapaddr, mapping); dma_unmap_len_set(re, maplen, len); le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(lower_32_bits(mapping)); le->length = cpu_to_le16(len); le->ctrl = ctrl; le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, mapping)) goto mapping_unwind; upper = upper_32_bits(mapping); if (upper != sky2->tx_last_upper) { le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(upper); sky2->tx_last_upper = upper; le->opcode = OP_ADDR64 | HW_OWNER; } re = sky2->tx_ring + slot; re->flags = TX_MAP_PAGE; dma_unmap_addr_set(re, mapaddr, mapping); dma_unmap_len_set(re, maplen, skb_frag_size(frag)); le = get_tx_le(sky2, &slot); le->addr = cpu_to_le32(lower_32_bits(mapping)); le->length = cpu_to_le16(skb_frag_size(frag)); le->ctrl = ctrl; le->opcode = OP_BUFFER | HW_OWNER; } re->skb = skb; le->ctrl |= EOP; sky2->tx_prod = slot; if (tx_avail(sky2) <= MAX_SKB_TX_LE) netif_stop_queue(dev); netdev_sent_queue(dev, skb->len); sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); return NETDEV_TX_OK; mapping_unwind: for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { re = sky2->tx_ring + i; sky2_tx_unmap(hw->pdev, re); } mapping_error: if (net_ratelimit()) dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* * Free ring elements from starting at tx_cons until "done" * * NB: * 1. The hardware will tell us about partial completion of multi-part * buffers so make sure not to free skb to early. * 2. This may run in parallel start_xmit because the it only * looks at the tail of the queue of FIFO (tx_cons), not * the head (tx_prod) */ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) { struct net_device *dev = sky2->netdev; u16 idx; unsigned int bytes_compl = 0, pkts_compl = 0; BUG_ON(done >= sky2->tx_ring_size); for (idx = sky2->tx_cons; idx != done; idx = RING_NEXT(idx, sky2->tx_ring_size)) { struct tx_ring_info *re = sky2->tx_ring + idx; struct sk_buff *skb = re->skb; sky2_tx_unmap(sky2->hw->pdev, re); if (skb) { netif_printk(sky2, tx_done, KERN_DEBUG, dev, "tx done %u\n", idx); pkts_compl++; bytes_compl += skb->len; re->skb = NULL; dev_kfree_skb_any(skb); sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); } } sky2->tx_cons = idx; smp_mb(); netdev_completed_queue(dev, pkts_compl, bytes_compl); u64_stats_update_begin(&sky2->tx_stats.syncp); sky2->tx_stats.packets += pkts_compl; sky2->tx_stats.bytes += bytes_compl; u64_stats_update_end(&sky2->tx_stats.syncp); } static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) { /* Disable Force Sync bit and Enable Alloc bit */ sky2_write8(hw, SK_REG(port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); /* Stop Interval Timer and Limit Counter of Tx Arbiter */ sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); /* Reset the PCI FIFO of the async Tx queue */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_RST_SET | BMU_FIFO_RST); /* Reset the Tx prefetch units */ sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); sky2_read32(hw, B0_CTST); } static void sky2_hw_down(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 ctrl; /* Force flow control off */ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); /* Stop transmitter */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET | RB_DIS_OP_MD); ctrl = gma_read16(hw, port, GM_GP_CTRL); ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); gma_write16(hw, port, GM_GP_CTRL, ctrl); sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); /* Workaround shared GMAC reset */ if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); /* Force any delayed status interrupt and NAPI */ sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); sky2_write32(hw, STAT_TX_TIMER_CNT, 0); sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); sky2_read8(hw, STAT_ISR_TIMER_CTRL); sky2_rx_stop(sky2); spin_lock_bh(&sky2->phy_lock); sky2_phy_power_down(hw, port); spin_unlock_bh(&sky2->phy_lock); sky2_tx_reset(hw, port); /* Free any pending frames stuck in HW queue */ sky2_tx_complete(sky2, sky2->tx_prod); } /* Network shutdown */ static int sky2_close(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; /* Never really got started! */ if (!sky2->tx_le) return 0; netif_info(sky2, ifdown, dev, "disabling interface\n"); if (hw->ports == 1) { sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); napi_disable(&hw->napi); free_irq(hw->pdev->irq, hw); hw->flags &= ~SKY2_HW_IRQ_SETUP; } else { u32 imask; /* Disable port IRQ */ imask = sky2_read32(hw, B0_IMSK); imask &= ~portirq_msk[sky2->port]; sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); synchronize_irq(hw->pdev->irq); napi_synchronize(&hw->napi); } sky2_hw_down(sky2); sky2_free_buffers(sky2); return 0; } static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) { if (hw->flags & SKY2_HW_FIBRE_PHY) return SPEED_1000; if (!(hw->flags & SKY2_HW_GIGABIT)) { if (aux & PHY_M_PS_SPEED_100) return SPEED_100; else return SPEED_10; } switch (aux & PHY_M_PS_SPEED_MSK) { case PHY_M_PS_SPEED_1000: return SPEED_1000; case PHY_M_PS_SPEED_100: return SPEED_100; default: return SPEED_10; } } static void sky2_link_up(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; static const char *fc_name[] = { [FC_NONE] = "none", [FC_TX] = "tx", [FC_RX] = "rx", [FC_BOTH] = "both", }; sky2_set_ipg(sky2); sky2_enable_rx_tx(sky2); gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); netif_carrier_on(sky2->netdev); mod_timer(&hw->watchdog_timer, jiffies + 1); /* Turn on link LED */ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); netif_info(sky2, link, sky2->netdev, "Link is up at %d Mbps, %s duplex, flow control %s\n", sky2->speed, sky2->duplex == DUPLEX_FULL ? "full" : "half", fc_name[sky2->flow_status]); } static void sky2_link_down(struct sky2_port *sky2) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 reg; gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); reg = gma_read16(hw, port, GM_GP_CTRL); reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); gma_write16(hw, port, GM_GP_CTRL, reg); netif_carrier_off(sky2->netdev); /* Turn off link LED */ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); netif_info(sky2, link, sky2->netdev, "Link is down\n"); sky2_phy_init(hw, port); } static enum flow_control sky2_flow(int rx, int tx) { if (rx) return tx ? FC_BOTH : FC_RX; else return tx ? FC_TX : FC_NONE; } static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 advert, lpa; advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); if (lpa & PHY_M_AN_RF) { netdev_err(sky2->netdev, "remote fault\n"); return -1; } if (!(aux & PHY_M_PS_SPDUP_RES)) { netdev_err(sky2->netdev, "speed/duplex mismatch\n"); return -1; } sky2->speed = sky2_phy_speed(hw, aux); sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; /* Since the pause result bits seem to in different positions on * different chips. look at registers. */ if (hw->flags & SKY2_HW_FIBRE_PHY) { /* Shift for bits in fiber PHY */ advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); if (advert & ADVERTISE_1000XPAUSE) advert |= ADVERTISE_PAUSE_CAP; if (advert & ADVERTISE_1000XPSE_ASYM) advert |= ADVERTISE_PAUSE_ASYM; if (lpa & LPA_1000XPAUSE) lpa |= LPA_PAUSE_CAP; if (lpa & LPA_1000XPAUSE_ASYM) lpa |= LPA_PAUSE_ASYM; } sky2->flow_status = FC_NONE; if (advert & ADVERTISE_PAUSE_CAP) { if (lpa & LPA_PAUSE_CAP) sky2->flow_status = FC_BOTH; else if (advert & ADVERTISE_PAUSE_ASYM) sky2->flow_status = FC_RX; } else if (advert & ADVERTISE_PAUSE_ASYM) { if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) sky2->flow_status = FC_TX; } if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) sky2->flow_status = FC_NONE; if (sky2->flow_status & FC_TX) sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); else sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); return 0; } /* Interrupt from PHY */ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); u16 istatus, phystat; if (!netif_running(dev)) return; spin_lock(&sky2->phy_lock); istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", istatus, phystat); if (istatus & PHY_M_IS_AN_COMPL) { if (sky2_autoneg_done(sky2, phystat) == 0 && !netif_carrier_ok(dev)) sky2_link_up(sky2); goto out; } if (istatus & PHY_M_IS_LSP_CHANGE) sky2->speed = sky2_phy_speed(hw, phystat); if (istatus & PHY_M_IS_DUP_CHANGE) sky2->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; if (istatus & PHY_M_IS_LST_CHANGE) { if (phystat & PHY_M_PS_LINK_UP) sky2_link_up(sky2); else sky2_link_down(sky2); } out: spin_unlock(&sky2->phy_lock); } /* Special quick link interrupt (Yukon-2 Optima only) */ static void sky2_qlink_intr(struct sky2_hw *hw) { struct sky2_port *sky2 = netdev_priv(hw->dev[0]); u32 imask; u16 phy; /* disable irq */ imask = sky2_read32(hw, B0_IMSK); imask &= ~Y2_IS_PHY_QLNK; sky2_write32(hw, B0_IMSK, imask); /* reset PHY Link Detect */ phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_link_up(sky2); } /* Transmit timeout is only called if we are running, carrier is up * and tx queue is full (stopped). */ static void sky2_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; netif_err(sky2, timer, dev, "tx timeout\n"); netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", sky2->tx_cons, sky2->tx_prod, sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); /* can't restart safely under softirq */ schedule_work(&hw->restart_work); } static int sky2_change_mtu(struct net_device *dev, int new_mtu) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; int err; u16 ctl, mode; u32 imask; if (!netif_running(dev)) { dev->mtu = new_mtu; netdev_update_features(dev); return 0; } imask = sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); netif_trans_update(dev); /* prevent tx timeout */ napi_disable(&hw->napi); netif_tx_disable(dev); synchronize_irq(hw->pdev->irq); if (!(hw->flags & SKY2_HW_RAM_BUFFER)) sky2_set_tx_stfwd(hw, port); ctl = gma_read16(hw, port, GM_GP_CTRL); gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); sky2_rx_stop(sky2); sky2_rx_clean(sky2); dev->mtu = new_mtu; netdev_update_features(dev); mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; if (sky2->speed > SPEED_100) mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000); else mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); if (dev->mtu > ETH_DATA_LEN) mode |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, mode); sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); err = sky2_alloc_rx_skbs(sky2); if (!err) sky2_rx_start(sky2); else sky2_rx_clean(sky2); sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); if (err) dev_close(dev); else { gma_write16(hw, port, GM_GP_CTRL, ctl); netif_wake_queue(dev); } return err; } static inline bool needs_copy(const struct rx_ring_info *re, unsigned length) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS /* Some architectures need the IP header to be aligned */ if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32))) return true; #endif return length < copybreak; } /* For small just reuse existing skb for next receive */ static struct sk_buff *receive_copy(struct sky2_port *sky2, const struct rx_ring_info *re, unsigned length) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(sky2->netdev, length); if (likely(skb)) { dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr, length, DMA_FROM_DEVICE); skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb_copy_hash(skb, re->skb); __vlan_hwaccel_copy_tag(skb, re->skb); dma_sync_single_for_device(&sky2->hw->pdev->dev, re->data_addr, length, DMA_FROM_DEVICE); __vlan_hwaccel_clear_tag(re->skb); skb_clear_hash(re->skb); re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); } return skb; } /* Adjust length of skb with fragments to match received data */ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, unsigned int length) { int i, num_frags; unsigned int size; /* put header into skb */ size = min(length, hdr_space); skb->tail += size; skb->len += size; length -= size; num_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < num_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (length == 0) { /* don't need this page */ __skb_frag_unref(frag, false); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); skb_frag_size_set(frag, size); skb->data_len += size; skb->truesize += PAGE_SIZE; skb->len += size; length -= size; } } } /* Normal packet - take skb from ring element and put in a new one */ static struct sk_buff *receive_new(struct sky2_port *sky2, struct rx_ring_info *re, unsigned int length) { struct sk_buff *skb; struct rx_ring_info nre; unsigned hdr_space = sky2->rx_data_size; nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); if (unlikely(!nre.skb)) goto nobuf; if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) goto nomap; skb = re->skb; sky2_rx_unmap_skb(sky2->hw->pdev, re); prefetch(skb->data); *re = nre; if (skb_shinfo(skb)->nr_frags) skb_put_frags(skb, hdr_space, length); else skb_put(skb, length); return skb; nomap: dev_kfree_skb(nre.skb); nobuf: return NULL; } /* * Receive one packet. * For larger packets, get new buffer. */ static struct sk_buff *sky2_receive(struct net_device *dev, u16 length, u32 status) { struct sky2_port *sky2 = netdev_priv(dev); struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; netif_printk(sky2, rx_status, KERN_DEBUG, dev, "rx slot %u status 0x%x len %d\n", sky2->rx_next, status, length); sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); if (skb_vlan_tag_present(re->skb)) count -= VLAN_HLEN; /* Account for vlan tag */ /* This chip has hardware problems that generates bogus status. * So do only marginal checking and expect higher level protocols * to handle crap frames. */ if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && length != count) goto okay; if (status & GMR_FS_ANY_ERR) goto error; if (!(status & GMR_FS_RX_OK)) goto resubmit; /* if length reported by DMA does not match PHY, packet was truncated */ if (length != count) goto error; okay: if (needs_copy(re, length)) skb = receive_copy(sky2, re, length); else skb = receive_new(sky2, re, length); dev->stats.rx_dropped += (skb == NULL); resubmit: sky2_rx_submit(sky2, re); return skb; error: ++dev->stats.rx_errors; if (net_ratelimit()) netif_info(sky2, rx_err, dev, "rx error, status 0x%x length %d\n", status, length); goto resubmit; } /* Transmit complete */ static inline void sky2_tx_done(struct net_device *dev, u16 last) { struct sky2_port *sky2 = netdev_priv(dev); if (netif_running(dev)) { sky2_tx_complete(sky2, last); /* Wake unless it's detached, and called e.g. from sky2_close() */ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) netif_wake_queue(dev); } } static inline void sky2_skb_rx(const struct sky2_port *sky2, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_NONE) netif_receive_skb(skb); else napi_gro_receive(&sky2->hw->napi, skb); } static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, unsigned packets, unsigned bytes) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); if (packets == 0) return; u64_stats_update_begin(&sky2->rx_stats.syncp); sky2->rx_stats.packets += packets; sky2->rx_stats.bytes += bytes; u64_stats_update_end(&sky2->rx_stats.syncp); sky2->last_rx = jiffies; sky2_rx_update(netdev_priv(dev), rxqaddr[port]); } static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) { /* If this happens then driver assuming wrong format for chip type */ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); /* Both checksum counters are programmed to start at * the same offset, so unless there is a problem they * should match. This failure is an early indication that * hardware receive checksumming won't work. */ if (likely((u16)(status >> 16) == (u16)status)) { struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = le16_to_cpu(status); } else { dev_notice(&sky2->hw->pdev->dev, "%s: receive checksum problem (status = %#x)\n", sky2->netdev->name, status); /* Disable checksum offload * It will be reenabled on next ndo_set_features, but if it's * really broken, will get disabled again */ sky2->netdev->features &= ~NETIF_F_RXCSUM; sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), BMU_DIS_RX_CHKSUM); } } static void sky2_rx_tag(struct sky2_port *sky2, u16 length) { struct sk_buff *skb; skb = sky2->rx_ring[sky2->rx_next].skb; __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length)); } static void sky2_rx_hash(struct sky2_port *sky2, u32 status) { struct sk_buff *skb; skb = sky2->rx_ring[sky2->rx_next].skb; skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3); } /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) { int work_done = 0; unsigned int total_bytes[2] = { 0 }; unsigned int total_packets[2] = { 0 }; if (to_do <= 0) return work_done; rmb(); do { struct sky2_port *sky2; struct sky2_status_le *le = hw->st_le + hw->st_idx; unsigned port; struct net_device *dev; struct sk_buff *skb; u32 status; u16 length; u8 opcode = le->opcode; if (!(opcode & HW_OWNER)) break; hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); port = le->css & CSS_LINK_BIT; dev = hw->dev[port]; sky2 = netdev_priv(dev); length = le16_to_cpu(le->length); status = le32_to_cpu(le->status); le->opcode = 0; switch (opcode & ~HW_OWNER) { case OP_RXSTAT: total_packets[port]++; total_bytes[port] += length; skb = sky2_receive(dev, length, status); if (!skb) break; /* This chip reports checksum status differently */ if (hw->flags & SKY2_HW_NEW_LE) { if ((dev->features & NETIF_F_RXCSUM) && (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && (le->css & CSS_TCPUDPCSOK)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } skb->protocol = eth_type_trans(skb, dev); sky2_skb_rx(sky2, skb); /* Stop after net poll weight */ if (++work_done >= to_do) goto exit_loop; break; case OP_RXVLAN: sky2_rx_tag(sky2, length); break; case OP_RXCHKSVLAN: sky2_rx_tag(sky2, length); fallthrough; case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) sky2_rx_checksum(sky2, status); break; case OP_RSS_HASH: sky2_rx_hash(sky2, status); break; case OP_TXINDEXLE: /* TX index reports status for both ports */ sky2_tx_done(hw->dev[0], status & 0xfff); if (hw->dev[1]) sky2_tx_done(hw->dev[1], ((status >> 24) & 0xff) | (u16)(length & 0xf) << 8); break; default: if (net_ratelimit()) pr_warn("unknown status opcode 0x%x\n", opcode); } } while (hw->st_idx != idx); /* Fully processed status ring so clear irq */ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); exit_loop: sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); return work_done; } static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) { struct net_device *dev = hw->dev[port]; if (net_ratelimit()) netdev_info(dev, "hw error interrupt status 0x%x\n", status); if (status & Y2_IS_PAR_RD1) { if (net_ratelimit()) netdev_err(dev, "ram data read parity error\n"); /* Clear IRQ */ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); } if (status & Y2_IS_PAR_WR1) { if (net_ratelimit()) netdev_err(dev, "ram data write parity error\n"); sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); } if (status & Y2_IS_PAR_MAC1) { if (net_ratelimit()) netdev_err(dev, "MAC parity error\n"); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); } if (status & Y2_IS_PAR_RX1) { if (net_ratelimit()) netdev_err(dev, "RX parity error\n"); sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); } if (status & Y2_IS_TCP_TXA1) { if (net_ratelimit()) netdev_err(dev, "TCP segmentation error\n"); sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); } } static void sky2_hw_intr(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; u32 status = sky2_read32(hw, B0_HWE_ISRC); u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); status &= hwmsk; if (status & Y2_IS_TIST_OV) sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_err = sky2_pci_read16(hw, PCI_STATUS); if (net_ratelimit()) dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); sky2_pci_write16(hw, PCI_STATUS, pci_err | PCI_STATUS_ERROR_BITS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ u32 err; sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 0xfffffffful); if (net_ratelimit()) dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } if (status & Y2_HWE_L1_MASK) sky2_hw_error(hw, 0, status); status >>= 8; if (status & Y2_HWE_L1_MASK) sky2_hw_error(hw, 1, status); } static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); if (status & GM_IS_RX_CO_OV) gma_read16(hw, port, GM_RX_IRQ_SRC); if (status & GM_IS_TX_CO_OV) gma_read16(hw, port, GM_TX_IRQ_SRC); if (status & GM_IS_RX_FF_OR) { ++dev->stats.rx_fifo_errors; sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { ++dev->stats.tx_fifo_errors; sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); } } /* This should never happen it is a bug. */ static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) { struct net_device *dev = hw->dev[port]; u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", dev->name, (unsigned) q, (unsigned) idx, (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); } static int sky2_rx_hung(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned rxq = rxqaddr[port]; u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); /* If idle and MAC or PCI is stuck */ if (sky2->check.last == sky2->last_rx && ((mac_rp == sky2->check.mac_rp && mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || /* Check if the PCI RX hang */ (fifo_rp == sky2->check.fifo_rp && fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { netdev_printk(KERN_DEBUG, dev, "hung mac %d:%d fifo %d (%d:%d)\n", mac_lev, mac_rp, fifo_lev, fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); return 1; } else { sky2->check.last = sky2->last_rx; sky2->check.mac_rp = mac_rp; sky2->check.mac_lev = mac_lev; sky2->check.fifo_rp = fifo_rp; sky2->check.fifo_lev = fifo_lev; return 0; } } static void sky2_watchdog(struct timer_list *t) { struct sky2_hw *hw = from_timer(hw, t, watchdog_timer); /* Check for lost IRQ once a second */ if (sky2_read32(hw, B0_ISRC)) { napi_schedule(&hw->napi); } else { int i, active = 0; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; if (!netif_running(dev)) continue; ++active; /* For chips with Rx FIFO, check if stuck */ if ((hw->flags & SKY2_HW_RAM_BUFFER) && sky2_rx_hung(dev)) { netdev_info(dev, "receiver hang detected\n"); schedule_work(&hw->restart_work); return; } } if (active == 0) return; } mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); } /* Hardware/software error handling */ static void sky2_err_intr(struct sky2_hw *hw, u32 status) { if (net_ratelimit()) dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); if (status & Y2_IS_IRQ_MAC1) sky2_mac_intr(hw, 0); if (status & Y2_IS_IRQ_MAC2) sky2_mac_intr(hw, 1); if (status & Y2_IS_CHK_RX1) sky2_le_error(hw, 0, Q_R1); if (status & Y2_IS_CHK_RX2) sky2_le_error(hw, 1, Q_R2); if (status & Y2_IS_CHK_TXA1) sky2_le_error(hw, 0, Q_XA1); if (status & Y2_IS_CHK_TXA2) sky2_le_error(hw, 1, Q_XA2); } static int sky2_poll(struct napi_struct *napi, int work_limit) { struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); u32 status = sky2_read32(hw, B0_Y2_SP_EISR); int work_done = 0; u16 idx; if (unlikely(status & Y2_IS_ERROR)) sky2_err_intr(hw, status); if (status & Y2_IS_IRQ_PHY1) sky2_phy_intr(hw, 0); if (status & Y2_IS_IRQ_PHY2) sky2_phy_intr(hw, 1); if (status & Y2_IS_PHY_QLNK) sky2_qlink_intr(hw); while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { work_done += sky2_status_intr(hw, work_limit - work_done, idx); if (work_done >= work_limit) goto done; } napi_complete_done(napi, work_done); sky2_read32(hw, B0_Y2_SP_LISR); done: return work_done; } static irqreturn_t sky2_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; u32 status; /* Reading this mask interrupts as side effect */ status = sky2_read32(hw, B0_Y2_SP_ISRC2); if (status == 0 || status == ~0) { sky2_write32(hw, B0_Y2_SP_ICR, 2); return IRQ_NONE; } prefetch(&hw->st_le[hw->st_idx]); napi_schedule(&hw->napi); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void sky2_netpoll(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); napi_schedule(&sky2->hw->napi); } #endif /* Chip internal frequency for clock calculations */ static u32 sky2_mhz(const struct sky2_hw *hw) { switch (hw->chip_id) { case CHIP_ID_YUKON_EC: case CHIP_ID_YUKON_EC_U: case CHIP_ID_YUKON_EX: case CHIP_ID_YUKON_SUPR: case CHIP_ID_YUKON_UL_2: case CHIP_ID_YUKON_OPT: case CHIP_ID_YUKON_PRM: case CHIP_ID_YUKON_OP_2: return 125; case CHIP_ID_YUKON_FE: return 100; case CHIP_ID_YUKON_FE_P: return 50; case CHIP_ID_YUKON_XL: return 156; default: BUG(); } } static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) { return sky2_mhz(hw) * us; } static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) { return clk / sky2_mhz(hw); } static int sky2_init(struct sky2_hw *hw) { u8 t8; /* Enable all clocks and check for bad PCI access */ sky2_pci_write32(hw, PCI_DEV_REG3, 0); sky2_write8(hw, B0_CTST, CS_RST_CLR); hw->chip_id = sky2_read8(hw, B2_CHIP_ID); hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; switch (hw->chip_id) { case CHIP_ID_YUKON_XL: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; if (hw->chip_rev < CHIP_REV_YU_XL_A2) hw->flags |= SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_EC_U: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_ADV_POWER_CTL; break; case CHIP_ID_YUKON_EX: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_ADV_POWER_CTL | SKY2_HW_RSS_CHKSUM; /* New transmit checksum */ if (hw->chip_rev != CHIP_REV_YU_EX_B0) hw->flags |= SKY2_HW_AUTO_TX_SUM; break; case CHIP_ID_YUKON_EC: /* This rev is really old, and requires untested workarounds */ if (hw->chip_rev == CHIP_REV_YU_EC_A1) { dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); return -EOPNOTSUPP; } hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_FE: hw->flags = SKY2_HW_RSS_BROKEN; break; case CHIP_ID_YUKON_FE_P: hw->flags = SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_AUTO_TX_SUM | SKY2_HW_ADV_POWER_CTL; /* The workaround for status conflicts VLAN tag detection. */ if (hw->chip_rev == CHIP_REV_YU_FE2_A0) hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; break; case CHIP_ID_YUKON_SUPR: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_NEW_LE | SKY2_HW_AUTO_TX_SUM | SKY2_HW_ADV_POWER_CTL; if (hw->chip_rev == CHIP_REV_YU_SU_A0) hw->flags |= SKY2_HW_RSS_CHKSUM; break; case CHIP_ID_YUKON_UL_2: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_ADV_POWER_CTL; break; case CHIP_ID_YUKON_OPT: case CHIP_ID_YUKON_PRM: case CHIP_ID_YUKON_OP_2: hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEW_LE | SKY2_HW_ADV_POWER_CTL; break; default: dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", hw->chip_id); return -EOPNOTSUPP; } hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') hw->flags |= SKY2_HW_FIBRE_PHY; hw->ports = 1; t8 = sky2_read8(hw, B2_Y2_HW_RES); if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) ++hw->ports; } if (sky2_read8(hw, B2_E_0)) hw->flags |= SKY2_HW_RAM_BUFFER; return 0; } static void sky2_reset(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; u16 status; int i; u32 hwe_mask = Y2_HWE_ALL_MASK; /* disable ASF */ if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { sky2_write32(hw, CPU_WDOG, 0); status = sky2_read16(hw, HCU_CCSR); status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | HCU_CCSR_UC_STATE_MSK); /* * CPU clock divider shouldn't be used because * - ASF firmware may malfunction * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks */ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; sky2_write16(hw, HCU_CCSR, status); sky2_write32(hw, CPU_WDOG, 0); } else sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); /* do a SW reset */ sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_write8(hw, B0_CTST, CS_RST_CLR); /* allow writes to PCI config */ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); /* clear PCI errors, if any */ status = sky2_pci_read16(hw, PCI_STATUS); status |= PCI_STATUS_ERROR_BITS; sky2_pci_write16(hw, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); if (pci_is_pcie(pdev)) { sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 0xfffffffful); /* If error bit is stuck on ignore it */ if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) dev_info(&pdev->dev, "ignoring stuck error report bit\n"); else hwe_mask |= Y2_IS_PCI_EXP; } sky2_power_on(hw); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); for (i = 0; i < hw->ports; i++) { sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) sky2_write16(hw, SK_REG(i, GMAC_CTRL), GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | GMC_BYP_RETR_ON); } if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { /* enable MACSec clock gating */ sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); } if (hw->chip_id == CHIP_ID_YUKON_OPT || hw->chip_id == CHIP_ID_YUKON_PRM || hw->chip_id == CHIP_ID_YUKON_OP_2) { u16 reg; if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ reg = 10; /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); } else { /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ reg = 3; } reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT; /* reset PHY Link Detect */ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); /* check if PSMv2 was running before */ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); if (reg & PCI_EXP_LNKCTL_ASPMC) /* restore the PCIe Link Control register */ sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, reg); if (hw->chip_id == CHIP_ID_YUKON_PRM && hw->chip_rev == CHIP_REV_YU_PRM_A0) { /* change PHY Interrupt polarity to low active */ reg = sky2_read16(hw, GPHY_CTRL); sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL); /* adapt HW for low active PHY Interrupt */ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL); sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1); } sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); } /* Clear I2C IRQ noise */ sky2_write32(hw, B2_I2C_IRQ, 1); /* turn off hardware timer (unused) */ sky2_write8(hw, B2_TI_CTRL, TIM_STOP); sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); /* Turn off descriptor polling */ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); /* Turn off receive timestamp */ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); /* enable the Tx Arbiters */ for (i = 0; i < hw->ports; i++) sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); /* Initialize ram interface */ for (i = 0; i < hw->ports; i++) { sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); } sky2_write32(hw, B0_HWE_IMSK, hwe_mask); for (i = 0; i < hw->ports; i++) sky2_gmac_reset(hw, i); memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); hw->st_idx = 0; sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); /* Set the list last index */ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); sky2_write16(hw, STAT_TX_IDX_TH, 10); sky2_write8(hw, STAT_FIFO_WM, 16); /* set Status-FIFO ISR watermark */ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) sky2_write8(hw, STAT_FIFO_ISR_WM, 4); else sky2_write8(hw, STAT_FIFO_ISR_WM, 16); sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); /* enable status unit */ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); } /* Take device down (offline). * Equivalent to doing dev_stop() but this does not * inform upper layers of the transition. */ static void sky2_detach(struct net_device *dev) { if (netif_running(dev)) { netif_tx_lock(dev); netif_device_detach(dev); /* stop txq */ netif_tx_unlock(dev); sky2_close(dev); } } /* Bring device back after doing sky2_detach */ static int sky2_reattach(struct net_device *dev) { int err = 0; if (netif_running(dev)) { err = sky2_open(dev); if (err) { netdev_info(dev, "could not restart %d\n", err); dev_close(dev); } else { netif_device_attach(dev); sky2_set_multicast(dev); } } return err; } static void sky2_all_down(struct sky2_hw *hw) { int i; if (hw->flags & SKY2_HW_IRQ_SETUP) { sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); synchronize_irq(hw->pdev->irq); napi_disable(&hw->napi); } for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev)) continue; netif_carrier_off(dev); netif_tx_disable(dev); sky2_hw_down(sky2); } } static void sky2_all_up(struct sky2_hw *hw) { u32 imask = Y2_IS_BASE; int i; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev)) continue; sky2_hw_up(sky2); sky2_set_multicast(dev); imask |= portirq_msk[i]; netif_wake_queue(dev); } if (hw->flags & SKY2_HW_IRQ_SETUP) { sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); } } static void sky2_restart(struct work_struct *work) { struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); rtnl_lock(); sky2_all_down(hw); sky2_reset(hw); sky2_all_up(hw); rtnl_unlock(); } static inline u8 sky2_wol_supported(const struct sky2_hw *hw) { return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; } static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { const struct sky2_port *sky2 = netdev_priv(dev); wol->supported = sky2_wol_supported(sky2->hw); wol->wolopts = sky2->wol; } static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; bool enable_wakeup = false; int i; if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || !device_can_wakeup(&hw->pdev->dev)) return -EOPNOTSUPP; sky2->wol = wol->wolopts; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (sky2->wol) enable_wakeup = true; } device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); return 0; } static u32 sky2_supported_modes(const struct sky2_hw *hw) { if (sky2_is_copper(hw)) { u32 modes = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; if (hw->flags & SKY2_HW_GIGABIT) modes |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; return modes; } else return SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; } static int sky2_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; u32 supported, advertising; supported = sky2_supported_modes(hw); cmd->base.phy_address = PHY_ADDR_MARV; if (sky2_is_copper(hw)) { cmd->base.port = PORT_TP; cmd->base.speed = sky2->speed; supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { cmd->base.speed = SPEED_1000; cmd->base.port = PORT_FIBRE; supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } advertising = sky2->advertising; cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) ? AUTONEG_ENABLE : AUTONEG_DISABLE; cmd->base.duplex = sky2->duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int sky2_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; u32 supported = sky2_supported_modes(hw); u32 new_advertising; ethtool_convert_link_mode_to_legacy_u32(&new_advertising, cmd->link_modes.advertising); if (cmd->base.autoneg == AUTONEG_ENABLE) { if (new_advertising & ~supported) return -EINVAL; if (sky2_is_copper(hw)) sky2->advertising = new_advertising | ADVERTISED_TP | ADVERTISED_Autoneg; else sky2->advertising = new_advertising | ADVERTISED_FIBRE | ADVERTISED_Autoneg; sky2->flags |= SKY2_FLAG_AUTO_SPEED; sky2->duplex = -1; sky2->speed = -1; } else { u32 setting; u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; break; default: return -EINVAL; } if ((setting & supported) == 0) return -EINVAL; sky2->speed = speed; sky2->duplex = cmd->base.duplex; sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } if (netif_running(dev)) { sky2_phy_reinit(sky2); sky2_set_multicast(dev); } return 0; } static void sky2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct sky2_port *sky2 = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(sky2->hw->pdev), sizeof(info->bus_info)); } static const struct sky2_stat { char name[ETH_GSTRING_LEN]; u16 offset; } sky2_stats[] = { { "tx_bytes", GM_TXO_OK_HI }, { "rx_bytes", GM_RXO_OK_HI }, { "tx_broadcast", GM_TXF_BC_OK }, { "rx_broadcast", GM_RXF_BC_OK }, { "tx_multicast", GM_TXF_MC_OK }, { "rx_multicast", GM_RXF_MC_OK }, { "tx_unicast", GM_TXF_UC_OK }, { "rx_unicast", GM_RXF_UC_OK }, { "tx_mac_pause", GM_TXF_MPAUSE }, { "rx_mac_pause", GM_RXF_MPAUSE }, { "collisions", GM_TXF_COL }, { "late_collision",GM_TXF_LAT_COL }, { "aborted", GM_TXF_ABO_COL }, { "single_collisions", GM_TXF_SNG_COL }, { "multi_collisions", GM_TXF_MUL_COL }, { "rx_short", GM_RXF_SHT }, { "rx_runt", GM_RXE_FRAG }, { "rx_64_byte_packets", GM_RXF_64B }, { "rx_65_to_127_byte_packets", GM_RXF_127B }, { "rx_128_to_255_byte_packets", GM_RXF_255B }, { "rx_256_to_511_byte_packets", GM_RXF_511B }, { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, { "rx_too_long", GM_RXF_LNG_ERR }, { "rx_fifo_overflow", GM_RXE_FIFO_OV }, { "rx_jabber", GM_RXF_JAB_PKT }, { "rx_fcs_error", GM_RXF_FCS_ERR }, { "tx_64_byte_packets", GM_TXF_64B }, { "tx_65_to_127_byte_packets", GM_TXF_127B }, { "tx_128_to_255_byte_packets", GM_TXF_255B }, { "tx_256_to_511_byte_packets", GM_TXF_511B }, { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, { "tx_fifo_underrun", GM_TXE_FIFO_UR }, }; static u32 sky2_get_msglevel(struct net_device *netdev) { struct sky2_port *sky2 = netdev_priv(netdev); return sky2->msg_enable; } static int sky2_nway_reset(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) return -EINVAL; sky2_phy_reinit(sky2); sky2_set_multicast(dev); return 0; } static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; int i; data[0] = get_stats64(hw, port, GM_TXO_OK_LO); data[1] = get_stats64(hw, port, GM_RXO_OK_LO); for (i = 2; i < count; i++) data[i] = get_stats32(hw, port, sky2_stats[i].offset); } static void sky2_set_msglevel(struct net_device *netdev, u32 value) { struct sky2_port *sky2 = netdev_priv(netdev); sky2->msg_enable = value; } static int sky2_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(sky2_stats); default: return -EOPNOTSUPP; } } static void sky2_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { struct sky2_port *sky2 = netdev_priv(dev); sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); } static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, sky2_stats[i].name, ETH_GSTRING_LEN); break; } } static int sky2_set_mac_address(struct net_device *dev, void *p) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; const struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, addr->sa_data); memcpy_toio(hw->regs + B2_MAC_1 + port * 8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port * 8, dev->dev_addr, ETH_ALEN); /* virtual address for data */ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); /* physical address: used for pause frames */ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); return 0; } static inline void sky2_add_filter(u8 filter[8], const u8 *addr) { u32 bit; bit = ether_crc(ETH_ALEN, addr) & 63; filter[bit >> 3] |= 1 << (bit & 7); } static void sky2_set_multicast(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; struct netdev_hw_addr *ha; u16 reg; u8 filter[8]; int rx_pause; static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); memset(filter, 0, sizeof(filter)); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) memset(filter, 0xff, sizeof(filter)); else if (netdev_mc_empty(dev) && !rx_pause) reg &= ~GM_RXCR_MCF_ENA; else { reg |= GM_RXCR_MCF_ENA; if (rx_pause) sky2_add_filter(filter, pause_mc_addr); netdev_for_each_mc_addr(ha, dev) sky2_add_filter(filter, ha->addr); } gma_write16(hw, port, GM_MC_ADDR_H1, (u16) filter[0] | ((u16) filter[1] << 8)); gma_write16(hw, port, GM_MC_ADDR_H2, (u16) filter[2] | ((u16) filter[3] << 8)); gma_write16(hw, port, GM_MC_ADDR_H3, (u16) filter[4] | ((u16) filter[5] << 8)); gma_write16(hw, port, GM_MC_ADDR_H4, (u16) filter[6] | ((u16) filter[7] << 8)); gma_write16(hw, port, GM_RX_CTRL, reg); } static void sky2_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned int start; u64 _bytes, _packets; do { start = u64_stats_fetch_begin(&sky2->rx_stats.syncp); _bytes = sky2->rx_stats.bytes; _packets = sky2->rx_stats.packets; } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start)); stats->rx_packets = _packets; stats->rx_bytes = _bytes; do { start = u64_stats_fetch_begin(&sky2->tx_stats.syncp); _bytes = sky2->tx_stats.bytes; _packets = sky2->tx_stats.packets; } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start)); stats->tx_packets = _packets; stats->tx_bytes = _bytes; stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) + get_stats32(hw, port, GM_RXF_BC_OK); stats->collisions = get_stats32(hw, port, GM_TXF_COL); stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) + get_stats32(hw, port, GM_RXE_FRAG); stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); stats->rx_dropped = dev->stats.rx_dropped; stats->rx_fifo_errors = dev->stats.rx_fifo_errors; stats->tx_fifo_errors = dev->stats.tx_fifo_errors; } /* Can have one global because blinking is controlled by * ethtool and that is always under RTNL mutex */ static void sky2_led(struct sky2_port *sky2, enum led_mode mode) { struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; spin_lock_bh(&sky2->phy_lock); if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_SUPR) { u16 pg; pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); switch (mode) { case MO_LED_OFF: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(8) | PHY_M_LEDC_INIT_CTRL(8) | PHY_M_LEDC_STA1_CTRL(8) | PHY_M_LEDC_STA0_CTRL(8)); break; case MO_LED_ON: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(9) | PHY_M_LEDC_INIT_CTRL(9) | PHY_M_LEDC_STA1_CTRL(9) | PHY_M_LEDC_STA0_CTRL(9)); break; case MO_LED_BLINK: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(0xa) | PHY_M_LEDC_INIT_CTRL(0xa) | PHY_M_LEDC_STA1_CTRL(0xa) | PHY_M_LEDC_STA0_CTRL(0xa)); break; case MO_LED_NORM: gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | PHY_M_LEDC_INIT_CTRL(8) | PHY_M_LEDC_STA1_CTRL(7) | PHY_M_LEDC_STA0_CTRL(7)); } gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); } else gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(mode) | PHY_M_LED_MO_10(mode) | PHY_M_LED_MO_100(mode) | PHY_M_LED_MO_1000(mode) | PHY_M_LED_MO_RX(mode) | PHY_M_LED_MO_TX(mode)); spin_unlock_bh(&sky2->phy_lock); } /* blink LED's for finding board */ static int sky2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct sky2_port *sky2 = netdev_priv(dev); switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ case ETHTOOL_ID_INACTIVE: sky2_led(sky2, MO_LED_NORM); break; case ETHTOOL_ID_ON: sky2_led(sky2, MO_LED_ON); break; case ETHTOOL_ID_OFF: sky2_led(sky2, MO_LED_OFF); break; } return 0; } static void sky2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); switch (sky2->flow_mode) { case FC_NONE: ecmd->tx_pause = ecmd->rx_pause = 0; break; case FC_TX: ecmd->tx_pause = 1, ecmd->rx_pause = 0; break; case FC_RX: ecmd->tx_pause = 0, ecmd->rx_pause = 1; break; case FC_BOTH: ecmd->tx_pause = ecmd->rx_pause = 1; } ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; } static int sky2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct sky2_port *sky2 = netdev_priv(dev); if (ecmd->autoneg == AUTONEG_ENABLE) sky2->flags |= SKY2_FLAG_AUTO_PAUSE; else sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); if (netif_running(dev)) sky2_phy_reinit(sky2); return 0; } static int sky2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) ecmd->tx_coalesce_usecs = 0; else { u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); } ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) ecmd->rx_coalesce_usecs = 0; else { u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); } ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) ecmd->rx_coalesce_usecs_irq = 0; else { u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); } ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); return 0; } /* Note: this affect both ports */ static int sky2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; const u32 tmax = sky2_clk2us(hw, 0x0ffffff); if (ecmd->tx_coalesce_usecs > tmax || ecmd->rx_coalesce_usecs > tmax || ecmd->rx_coalesce_usecs_irq > tmax) return -EINVAL; if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) return -EINVAL; if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) return -EINVAL; if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) return -EINVAL; if (ecmd->tx_coalesce_usecs == 0) sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); } sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); if (ecmd->rx_coalesce_usecs == 0) sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); } sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); if (ecmd->rx_coalesce_usecs_irq == 0) sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); else { sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); } sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); return 0; } /* * Hardware is limited to min of 128 and max of 2048 for ring size * and rounded up to next power of two * to avoid division in modulus calculation */ static unsigned long roundup_ring_size(unsigned long pending) { return max(128ul, roundup_pow_of_two(pending+1)); } static void sky2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); ering->rx_max_pending = RX_MAX_PENDING; ering->tx_max_pending = TX_MAX_PENDING; ering->rx_pending = sky2->rx_pending; ering->tx_pending = sky2->tx_pending; } static int sky2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); if (ering->rx_pending > RX_MAX_PENDING || ering->rx_pending < 8 || ering->tx_pending < TX_MIN_PENDING || ering->tx_pending > TX_MAX_PENDING) return -EINVAL; sky2_detach(dev); sky2->rx_pending = ering->rx_pending; sky2->tx_pending = ering->tx_pending; sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending); return sky2_reattach(dev); } static int sky2_get_regs_len(struct net_device *dev) { return 0x4000; } static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) { /* This complicated switch statement is to make sure and * only access regions that are unreserved. * Some blocks are only valid on dual port cards. */ switch (b) { /* second port */ case 5: /* Tx Arbiter 2 */ case 9: /* RX2 */ case 14 ... 15: /* TX2 */ case 17: case 19: /* Ram Buffer 2 */ case 22 ... 23: /* Tx Ram Buffer 2 */ case 25: /* Rx MAC Fifo 1 */ case 27: /* Tx MAC Fifo 2 */ case 31: /* GPHY 2 */ case 40 ... 47: /* Pattern Ram 2 */ case 52: case 54: /* TCP Segmentation 2 */ case 112 ... 116: /* GMAC 2 */ return hw->ports > 1; case 0: /* Control */ case 2: /* Mac address */ case 4: /* Tx Arbiter 1 */ case 7: /* PCI express reg */ case 8: /* RX1 */ case 12 ... 13: /* TX1 */ case 16: case 18:/* Rx Ram Buffer 1 */ case 20 ... 21: /* Tx Ram Buffer 1 */ case 24: /* Rx MAC Fifo 1 */ case 26: /* Tx MAC Fifo 1 */ case 28 ... 29: /* Descriptor and status unit */ case 30: /* GPHY 1*/ case 32 ... 39: /* Pattern Ram 1 */ case 48: case 50: /* TCP Segmentation 1 */ case 56 ... 60: /* PCI space */ case 80 ... 84: /* GMAC 1 */ return 1; default: return 0; } } /* * Returns copy of control register region * Note: ethtool_get_regs always provides full size (16k) buffer */ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { const struct sky2_port *sky2 = netdev_priv(dev); const void __iomem *io = sky2->hw->regs; unsigned int b; regs->version = 1; for (b = 0; b < 128; b++) { /* skip poisonous diagnostic ram region in block 3 */ if (b == 3) memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); else if (sky2_reg_access_ok(sky2->hw, b)) memcpy_fromio(p, io, 128); else memset(p, 0, 128); p += 128; io += 128; } } static int sky2_get_eeprom_len(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; u16 reg2; reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); int rc; eeprom->magic = SKY2_EEPROM_MAGIC; rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, data); if (rc < 0) return rc; eeprom->len = rc; return 0; } static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); int rc; if (eeprom->magic != SKY2_EEPROM_MAGIC) return -EINVAL; rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, data); return rc < 0 ? rc : 0; } static netdev_features_t sky2_fix_features(struct net_device *dev, netdev_features_t features) { const struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_hw *hw = sky2->hw; /* In order to do Jumbo packets on these chips, need to turn off the * transmit store/forward. Therefore checksum offload won't work. */ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { netdev_info(dev, "checksum offload not possible with jumbo frames\n"); features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_CSUM_MASK); } /* Some hardware requires receive checksum for RSS to work. */ if ( (features & NETIF_F_RXHASH) && !(features & NETIF_F_RXCSUM) && (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { netdev_info(dev, "receive hashing forces receive checksum\n"); features |= NETIF_F_RXCSUM; } return features; } static int sky2_set_features(struct net_device *dev, netdev_features_t features) { struct sky2_port *sky2 = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; if ((changed & NETIF_F_RXCSUM) && !(sky2->hw->flags & SKY2_HW_NEW_LE)) { sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), (features & NETIF_F_RXCSUM) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } if (changed & NETIF_F_RXHASH) rx_set_rss(dev, features); if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) sky2_vlan_mode(dev, features); return 0; } static const struct ethtool_ops sky2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ, .get_drvinfo = sky2_get_drvinfo, .get_wol = sky2_get_wol, .set_wol = sky2_set_wol, .get_msglevel = sky2_get_msglevel, .set_msglevel = sky2_set_msglevel, .nway_reset = sky2_nway_reset, .get_regs_len = sky2_get_regs_len, .get_regs = sky2_get_regs, .get_link = ethtool_op_get_link, .get_eeprom_len = sky2_get_eeprom_len, .get_eeprom = sky2_get_eeprom, .set_eeprom = sky2_set_eeprom, .get_strings = sky2_get_strings, .get_coalesce = sky2_get_coalesce, .set_coalesce = sky2_set_coalesce, .get_ringparam = sky2_get_ringparam, .set_ringparam = sky2_set_ringparam, .get_pauseparam = sky2_get_pauseparam, .set_pauseparam = sky2_set_pauseparam, .set_phys_id = sky2_set_phys_id, .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, .get_link_ksettings = sky2_get_link_ksettings, .set_link_ksettings = sky2_set_link_ksettings, }; #ifdef CONFIG_SKY2_DEBUG static struct dentry *sky2_debug; static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; const struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned idx, last; int sop; seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", sky2_read32(hw, B0_ISRC), sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); if (!netif_running(dev)) { seq_puts(seq, "network not running\n"); return 0; } napi_disable(&hw->napi); last = sky2_read16(hw, STAT_PUT_IDX); seq_printf(seq, "Status ring %u\n", hw->st_size); if (hw->st_idx == last) seq_puts(seq, "Status ring (empty)\n"); else { seq_puts(seq, "Status ring\n"); for (idx = hw->st_idx; idx != last && idx < hw->st_size; idx = RING_NEXT(idx, hw->st_size)) { const struct sky2_status_le *le = hw->st_le + idx; seq_printf(seq, "[%d] %#x %d %#x\n", idx, le->opcode, le->length, le->status); } seq_puts(seq, "\n"); } seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", sky2->tx_cons, sky2->tx_prod, sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); /* Dump contents of tx ring */ sop = 1; for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; idx = RING_NEXT(idx, sky2->tx_ring_size)) { const struct sky2_tx_le *le = sky2->tx_le + idx; u32 a = le32_to_cpu(le->addr); if (sop) seq_printf(seq, "%u:", idx); sop = 0; switch (le->opcode & ~HW_OWNER) { case OP_ADDR64: seq_printf(seq, " %#x:", a); break; case OP_LRGLEN: seq_printf(seq, " mtu=%d", a); break; case OP_VLAN: seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); break; case OP_TCPLISW: seq_printf(seq, " csum=%#x", a); break; case OP_LARGESEND: seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); break; case OP_PACKET: seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); break; case OP_BUFFER: seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); break; default: seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, a, le16_to_cpu(le->length)); } if (le->ctrl & EOP) { seq_putc(seq, '\n'); sop = 1; } } seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); sky2_read32(hw, B0_Y2_SP_LISR); napi_enable(&hw->napi); return 0; } DEFINE_SHOW_ATTRIBUTE(sky2_debug); /* * Use network device events to create/remove/rename * debugfs file entries */ static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct sky2_port *sky2 = netdev_priv(dev); if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGENAME: if (sky2->debugfs) { sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, sky2_debug, dev->name); } break; case NETDEV_GOING_DOWN: if (sky2->debugfs) { netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); debugfs_remove(sky2->debugfs); sky2->debugfs = NULL; } break; case NETDEV_UP: sky2->debugfs = debugfs_create_file(dev->name, 0444, sky2_debug, dev, &sky2_debug_fops); if (IS_ERR(sky2->debugfs)) sky2->debugfs = NULL; } return NOTIFY_DONE; } static struct notifier_block sky2_notifier = { .notifier_call = sky2_device_event, }; static __init void sky2_debug_init(void) { struct dentry *ent; ent = debugfs_create_dir("sky2", NULL); if (IS_ERR(ent)) return; sky2_debug = ent; register_netdevice_notifier(&sky2_notifier); } static __exit void sky2_debug_cleanup(void) { if (sky2_debug) { unregister_netdevice_notifier(&sky2_notifier); debugfs_remove(sky2_debug); sky2_debug = NULL; } } #else #define sky2_debug_init() #define sky2_debug_cleanup() #endif /* Two copies of network device operations to handle special case of * not allowing netpoll on second port */ static const struct net_device_ops sky2_netdev_ops[2] = { { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sky2_netpoll, #endif }, { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, .ndo_change_mtu = sky2_change_mtu, .ndo_fix_features = sky2_fix_features, .ndo_set_features = sky2_set_features, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, }, }; /* Initialize network device */ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, int highmem, int wol) { struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); int ret; if (!dev) return NULL; SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->irq = hw->pdev->irq; dev->ethtool_ops = &sky2_ethtool_ops; dev->watchdog_timeo = TX_WATCHDOG; dev->netdev_ops = &sky2_netdev_ops[port]; sky2 = netdev_priv(dev); sky2->netdev = dev; sky2->hw = hw; sky2->msg_enable = netif_msg_init(debug, default_msg); u64_stats_init(&sky2->tx_stats.syncp); u64_stats_init(&sky2->rx_stats.syncp); /* Auto speed and flow control */ sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; if (hw->chip_id != CHIP_ID_YUKON_XL) dev->hw_features |= NETIF_F_RXCSUM; sky2->flow_mode = FC_BOTH; sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); sky2->wol = wol; spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING); sky2->rx_pending = RX_DEF_PENDING; hw->dev[port] = dev; sky2->port = port; dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; if (highmem) dev->features |= NETIF_F_HIGHDMA; /* Enable receive hashing unless hardware is known broken */ if (!(hw->flags & SKY2_HW_RSS_BROKEN)) dev->hw_features |= NETIF_F_RXHASH; if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features |= SKY2_VLAN_OFFLOADS; } dev->features |= dev->hw_features; /* MTU range: 60 - 1500 or 9000 */ dev->min_mtu = ETH_ZLEN; if (hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_FE_P) dev->max_mtu = ETH_DATA_LEN; else dev->max_mtu = ETH_JUMBO_MTU; /* try to get mac address in the following order: * 1) from device tree data * 2) from internal registers set by bootloader */ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev); if (ret) { u8 addr[ETH_ALEN]; memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); eth_hw_addr_set(dev, addr); } /* if the address is invalid, use a random value */ if (!is_valid_ether_addr(dev->dev_addr)) { struct sockaddr sa = { AF_UNSPEC }; dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n"); eth_hw_addr_random(dev); memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); if (sky2_set_mac_address(dev, &sa)) dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n"); } return dev; } static void sky2_show_addr(struct net_device *dev) { const struct sky2_port *sky2 = netdev_priv(dev); netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); } /* Handle software interrupt used during MSI test */ static irqreturn_t sky2_test_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); if (status == 0) return IRQ_NONE; if (status & Y2_IS_IRQ_SW) { hw->flags |= SKY2_HW_USE_MSI; wake_up(&hw->msi_wait); sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); } sky2_write32(hw, B0_Y2_SP_ICR, 2); return IRQ_HANDLED; } /* Test interrupt path by forcing a software IRQ */ static int sky2_test_msi(struct sky2_hw *hw) { struct pci_dev *pdev = hw->pdev; int err; init_waitqueue_head(&hw->msi_wait); err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); return err; } sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); sky2_read8(hw, B0_CTST); wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); if (!(hw->flags & SKY2_HW_USE_MSI)) { /* MSI test failed, go back to INTx mode */ dev_info(&pdev->dev, "No interrupt generated using MSI, " "switching to INTx mode.\n"); err = -EOPNOTSUPP; sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); } sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); free_irq(pdev->irq, hw); return err; } /* This driver supports yukon2 chipset only */ static const char *sky2_name(u8 chipid, char *buf, int sz) { static const char *const name[] = { "XL", /* 0xb3 */ "EC Ultra", /* 0xb4 */ "Extreme", /* 0xb5 */ "EC", /* 0xb6 */ "FE", /* 0xb7 */ "FE+", /* 0xb8 */ "Supreme", /* 0xb9 */ "UL 2", /* 0xba */ "Unknown", /* 0xbb */ "Optima", /* 0xbc */ "OptimaEEE", /* 0xbd */ "Optima 2", /* 0xbe */ }; if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]); else snprintf(buf, sz, "(chip %#x)", chipid); return buf; } static const struct dmi_system_id msi_blacklist[] = { { .ident = "Dell Inspiron 1545", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"), }, }, { .ident = "Gateway P-79", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), DMI_MATCH(DMI_PRODUCT_NAME, "P-79"), }, }, { .ident = "ASUS P5W DH Deluxe", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTEK COMPUTER INC"), DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), }, }, { .ident = "ASUS P6T", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "P6T"), }, }, { .ident = "ASUS P6X", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "P6X"), }, }, {} }; static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev, *dev1; struct sky2_hw *hw; int err, using_dac = 0, wol_default; u32 reg; char buf1[16]; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); goto err_out; } /* Get configuration information * Note: only regular PCI config access once to test for HW issues * other PCI access through shared memory for speed and to * avoid MMCONFIG problems. */ err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); if (err) { dev_err(&pdev->dev, "PCI read config failed\n"); goto err_out_disable; } if (~reg == 0) { dev_err(&pdev->dev, "PCI configuration read error\n"); err = -EIO; goto err_out_disable; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_out_disable; } pci_set_master(pdev); if (sizeof(dma_addr_t) > sizeof(u32) && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { using_dac = 1; err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "unable to obtain 64 bit DMA " "for consistent allocations\n"); goto err_out_free_regions; } } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_out_free_regions; } } #ifdef __BIG_ENDIAN /* The sk98lin vendor driver uses hardware byte swapping but * this driver uses software swapping. */ reg &= ~PCI_REV_DESC; err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); if (err) { dev_err(&pdev->dev, "PCI write config failed\n"); goto err_out_free_regions; } #endif wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; err = -ENOMEM; hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + strlen(pci_name(pdev)) + 1, GFP_KERNEL); if (!hw) goto err_out_free_regions; hw->pdev = pdev; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; } err = sky2_init(hw); if (err) goto err_out_iounmap; /* ring for status responses */ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); hw->st_le = dma_alloc_coherent(&pdev->dev, hw->st_size * sizeof(struct sky2_status_le), &hw->st_dma, GFP_KERNEL); if (!hw->st_le) { err = -ENOMEM; goto err_out_reset; } dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); sky2_reset(hw); dev = sky2_init_netdev(hw, 0, using_dac, wol_default); if (!dev) { err = -ENOMEM; goto err_out_free_pci; } if (disable_msi == -1) disable_msi = !!dmi_check_system(msi_blacklist); if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); if (err) { pci_disable_msi(pdev); if (err != -EOPNOTSUPP) goto err_out_free_netdev; } } netif_napi_add(dev, &hw->napi, sky2_poll); err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "cannot register net device\n"); goto err_out_free_netdev; } netif_carrier_off(dev); sky2_show_addr(dev); if (hw->ports > 1) { dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); if (!dev1) { err = -ENOMEM; goto err_out_unregister; } err = register_netdev(dev1); if (err) { dev_err(&pdev->dev, "cannot register second net device\n"); goto err_out_free_dev1; } err = sky2_setup_irq(hw, hw->irq_name); if (err) goto err_out_unregister_dev1; sky2_show_addr(dev1); } timer_setup(&hw->watchdog_timer, sky2_watchdog, 0); INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); pdev->d3hot_delay = 300; return 0; err_out_unregister_dev1: unregister_netdev(dev1); err_out_free_dev1: free_netdev(dev1); err_out_unregister: unregister_netdev(dev); err_out_free_netdev: if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); free_netdev(dev); err_out_free_pci: dma_free_coherent(&pdev->dev, hw->st_size * sizeof(struct sky2_status_le), hw->st_le, hw->st_dma); err_out_reset: sky2_write8(hw, B0_CTST, CS_RST_SET); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: kfree(hw); err_out_free_regions: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out: return err; } static void sky2_remove(struct pci_dev *pdev) { struct sky2_hw *hw = pci_get_drvdata(pdev); int i; if (!hw) return; timer_shutdown_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); for (i = hw->ports-1; i >= 0; --i) unregister_netdev(hw->dev[i]); sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); sky2_power_aux(hw); sky2_write8(hw, B0_CTST, CS_RST_SET); sky2_read8(hw, B0_CTST); if (hw->ports > 1) { napi_disable(&hw->napi); free_irq(pdev->irq, hw); } if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); dma_free_coherent(&pdev->dev, hw->st_size * sizeof(struct sky2_status_le), hw->st_le, hw->st_dma); pci_release_regions(pdev); pci_disable_device(pdev); for (i = hw->ports-1; i >= 0; --i) free_netdev(hw->dev[i]); iounmap(hw->regs); kfree(hw); } static int sky2_suspend(struct device *dev) { struct sky2_hw *hw = dev_get_drvdata(dev); int i; if (!hw) return 0; del_timer_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); rtnl_lock(); sky2_all_down(hw); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); if (sky2->wol) sky2_wol_init(sky2); } sky2_power_aux(hw); rtnl_unlock(); return 0; } #ifdef CONFIG_PM_SLEEP static int sky2_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct sky2_hw *hw = pci_get_drvdata(pdev); int err; if (!hw) return 0; /* Re-enable all clocks */ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); if (err) { dev_err(&pdev->dev, "PCI write config failed\n"); goto out; } rtnl_lock(); sky2_reset(hw); sky2_all_up(hw); rtnl_unlock(); return 0; out: dev_err(&pdev->dev, "resume failed (%d)\n", err); pci_disable_device(pdev); return err; } static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); #define SKY2_PM_OPS (&sky2_pm_ops) #else #define SKY2_PM_OPS NULL #endif static void sky2_shutdown(struct pci_dev *pdev) { struct sky2_hw *hw = pci_get_drvdata(pdev); int port; for (port = 0; port < hw->ports; port++) { struct net_device *ndev = hw->dev[port]; rtnl_lock(); if (netif_running(ndev)) { dev_close(ndev); netif_device_detach(ndev); } rtnl_unlock(); } sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); } static struct pci_driver sky2_driver = { .name = DRV_NAME, .id_table = sky2_id_table, .probe = sky2_probe, .remove = sky2_remove, .shutdown = sky2_shutdown, .driver.pm = SKY2_PM_OPS, }; static int __init sky2_init_module(void) { pr_info("driver version " DRV_VERSION "\n"); sky2_debug_init(); return pci_register_driver(&sky2_driver); } static void __exit sky2_cleanup_module(void) { pci_unregister_driver(&sky2_driver); sky2_debug_cleanup(); } module_init(sky2_init_module); module_exit(sky2_cleanup_module); MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); MODULE_AUTHOR("Stephen Hemminger <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
linux-master
drivers/net/ethernet/marvell/sky2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports * Copyright (C) 2002 Matthew Dharm <[email protected]> * * Based on the 64360 driver from: * Copyright (C) 2002 Rabeeh Khoury <[email protected]> * Rabeeh Khoury <[email protected]> * * Copyright (C) 2003 PMC-Sierra, Inc., * written by Manish Lachwani * * Copyright (C) 2003 Ralf Baechle <[email protected]> * * Copyright (C) 2004-2006 MontaVista Software, Inc. * Dale Farnsworth <[email protected]> * * Copyright (C) 2004 Steven J. Hill <[email protected]> * <[email protected]> * * Copyright (C) 2007-2008 Marvell Semiconductor * Lennert Buytenhek <[email protected]> * * Copyright (C) 2013 Michael Stapelberg <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/in.h> #include <linux/ip.h> #include <net/tso.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/phy.h> #include <linux/mv643xx_eth.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_net.h> #include <linux/of_mdio.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; /* * Registers shared between all ports. */ #define PHY_ADDR 0x0000 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) #define WINDOW_BAR_ENABLE 0x0290 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) /* * Main per-port registers. These live at offset 0x0400 for * port #0, 0x0800 for port #1, and 0x0c00 for port #2. */ #define PORT_CONFIG 0x0000 #define UNICAST_PROMISCUOUS_MODE 0x00000001 #define PORT_CONFIG_EXT 0x0004 #define MAC_ADDR_LOW 0x0014 #define MAC_ADDR_HIGH 0x0018 #define SDMA_CONFIG 0x001c #define TX_BURST_SIZE_16_64BIT 0x01000000 #define TX_BURST_SIZE_4_64BIT 0x00800000 #define BLM_TX_NO_SWAP 0x00000020 #define BLM_RX_NO_SWAP 0x00000010 #define RX_BURST_SIZE_16_64BIT 0x00000008 #define RX_BURST_SIZE_4_64BIT 0x00000004 #define PORT_SERIAL_CONTROL 0x003c #define SET_MII_SPEED_TO_100 0x01000000 #define SET_GMII_SPEED_TO_1000 0x00800000 #define SET_FULL_DUPLEX_MODE 0x00200000 #define MAX_RX_PACKET_9700BYTE 0x000a0000 #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 #define DO_NOT_FORCE_LINK_FAIL 0x00000400 #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 #define FORCE_LINK_PASS 0x00000002 #define SERIAL_PORT_ENABLE 0x00000001 #define PORT_STATUS 0x0044 #define TX_FIFO_EMPTY 0x00000400 #define TX_IN_PROGRESS 0x00000080 #define PORT_SPEED_MASK 0x00000030 #define PORT_SPEED_1000 0x00000010 #define PORT_SPEED_100 0x00000020 #define PORT_SPEED_10 0x00000000 #define FLOW_CONTROL_ENABLED 0x00000008 #define FULL_DUPLEX 0x00000004 #define LINK_UP 0x00000002 #define TXQ_COMMAND 0x0048 #define TXQ_FIX_PRIO_CONF 0x004c #define PORT_SERIAL_CONTROL1 0x004c #define RGMII_EN 0x00000008 #define CLK125_BYPASS_EN 0x00000010 #define TX_BW_RATE 0x0050 #define TX_BW_MTU 0x0058 #define TX_BW_BURST 0x005c #define INT_CAUSE 0x0060 #define INT_TX_END 0x07f80000 #define INT_TX_END_0 0x00080000 #define INT_RX 0x000003fc #define INT_RX_0 0x00000004 #define INT_EXT 0x00000002 #define INT_CAUSE_EXT 0x0064 #define INT_EXT_LINK_PHY 0x00110000 #define INT_EXT_TX 0x000000ff #define INT_MASK 0x0068 #define INT_MASK_EXT 0x006c #define TX_FIFO_URGENT_THRESHOLD 0x0074 #define RX_DISCARD_FRAME_CNT 0x0084 #define RX_OVERRUN_FRAME_CNT 0x0088 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc #define TX_BW_RATE_MOVED 0x00e0 #define TX_BW_MTU_MOVED 0x00e8 #define TX_BW_BURST_MOVED 0x00ec #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) #define RXQ_COMMAND 0x0280 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) /* * Misc per-port registers. */ #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) /* * SDMA configuration register default value. */ #if defined(__BIG_ENDIAN) #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ (RX_BURST_SIZE_4_64BIT | \ TX_BURST_SIZE_4_64BIT) #elif defined(__LITTLE_ENDIAN) #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ (RX_BURST_SIZE_4_64BIT | \ BLM_RX_NO_SWAP | \ BLM_TX_NO_SWAP | \ TX_BURST_SIZE_4_64BIT) #else #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined #endif /* * Misc definitions. */ #define DEFAULT_RX_QUEUE_SIZE 128 #define DEFAULT_TX_QUEUE_SIZE 512 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) /* Max number of allowed TCP segments for software TSO */ #define MV643XX_MAX_TSO_SEGS 100 #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) #define DESC_DMA_MAP_SINGLE 0 #define DESC_DMA_MAP_PAGE 1 /* * RX/TX descriptors. */ #if defined(__BIG_ENDIAN) struct rx_desc { u16 byte_cnt; /* Descriptor buffer byte count */ u16 buf_size; /* Buffer size */ u32 cmd_sts; /* Descriptor command status */ u32 next_desc_ptr; /* Next descriptor pointer */ u32 buf_ptr; /* Descriptor buffer pointer */ }; struct tx_desc { u16 byte_cnt; /* buffer byte count */ u16 l4i_chk; /* CPU provided TCP checksum */ u32 cmd_sts; /* Command/status field */ u32 next_desc_ptr; /* Pointer to next descriptor */ u32 buf_ptr; /* pointer to buffer for this descriptor*/ }; #elif defined(__LITTLE_ENDIAN) struct rx_desc { u32 cmd_sts; /* Descriptor command status */ u16 buf_size; /* Buffer size */ u16 byte_cnt; /* Descriptor buffer byte count */ u32 buf_ptr; /* Descriptor buffer pointer */ u32 next_desc_ptr; /* Next descriptor pointer */ }; struct tx_desc { u32 cmd_sts; /* Command/status field */ u16 l4i_chk; /* CPU provided TCP checksum */ u16 byte_cnt; /* buffer byte count */ u32 buf_ptr; /* pointer to buffer for this descriptor*/ u32 next_desc_ptr; /* Pointer to next descriptor */ }; #else #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined #endif /* RX & TX descriptor command */ #define BUFFER_OWNED_BY_DMA 0x80000000 /* RX & TX descriptor status */ #define ERROR_SUMMARY 0x00000001 /* RX descriptor status */ #define LAYER_4_CHECKSUM_OK 0x40000000 #define RX_ENABLE_INTERRUPT 0x20000000 #define RX_FIRST_DESC 0x08000000 #define RX_LAST_DESC 0x04000000 #define RX_IP_HDR_OK 0x02000000 #define RX_PKT_IS_IPV4 0x01000000 #define RX_PKT_IS_ETHERNETV2 0x00800000 #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 #define RX_PKT_IS_VLAN_TAGGED 0x00080000 /* TX descriptor command */ #define TX_ENABLE_INTERRUPT 0x00800000 #define GEN_CRC 0x00400000 #define TX_FIRST_DESC 0x00200000 #define TX_LAST_DESC 0x00100000 #define ZERO_PADDING 0x00080000 #define GEN_IP_V4_CHECKSUM 0x00040000 #define GEN_TCP_UDP_CHECKSUM 0x00020000 #define UDP_FRAME 0x00010000 #define MAC_HDR_EXTRA_4_BYTES 0x00008000 #define GEN_TCP_UDP_CHK_FULL 0x00000400 #define MAC_HDR_EXTRA_8_BYTES 0x00000200 #define TX_IHL_SHIFT 11 /* global *******************************************************************/ struct mv643xx_eth_shared_private { /* * Ethernet controller base address. */ void __iomem *base; /* * Per-port MBUS window access register value. */ u32 win_protect; /* * Hardware-specific parameters. */ int extended_rx_coal_limit; int tx_bw_control; int tx_csum_limit; struct clk *clk; }; #define TX_BW_CONTROL_ABSENT 0 #define TX_BW_CONTROL_OLD_LAYOUT 1 #define TX_BW_CONTROL_NEW_LAYOUT 2 static int mv643xx_eth_open(struct net_device *dev); static int mv643xx_eth_stop(struct net_device *dev); /* per-port *****************************************************************/ struct mib_counters { u64 good_octets_received; u32 bad_octets_received; u32 internal_mac_transmit_err; u32 good_frames_received; u32 bad_frames_received; u32 broadcast_frames_received; u32 multicast_frames_received; u32 frames_64_octets; u32 frames_65_to_127_octets; u32 frames_128_to_255_octets; u32 frames_256_to_511_octets; u32 frames_512_to_1023_octets; u32 frames_1024_to_max_octets; u64 good_octets_sent; u32 good_frames_sent; u32 excessive_collision; u32 multicast_frames_sent; u32 broadcast_frames_sent; u32 unrec_mac_control_received; u32 fc_sent; u32 good_fc_received; u32 bad_fc_received; u32 undersize_received; u32 fragments_received; u32 oversize_received; u32 jabber_received; u32 mac_receive_error; u32 bad_crc_event; u32 collision; u32 late_collision; /* Non MIB hardware counters */ u32 rx_discard; u32 rx_overrun; }; struct rx_queue { int index; int rx_ring_size; int rx_desc_count; int rx_curr_desc; int rx_used_desc; struct rx_desc *rx_desc_area; dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb; }; struct tx_queue { int index; int tx_ring_size; int tx_desc_count; int tx_curr_desc; int tx_used_desc; int tx_stop_threshold; int tx_wake_threshold; char *tso_hdrs; dma_addr_t tso_hdrs_dma; struct tx_desc *tx_desc_area; char *tx_desc_mapping; /* array to track the type of the dma mapping */ dma_addr_t tx_desc_dma; int tx_desc_area_size; struct sk_buff_head tx_skb; unsigned long tx_packets; unsigned long tx_bytes; unsigned long tx_dropped; }; struct mv643xx_eth_private { struct mv643xx_eth_shared_private *shared; void __iomem *base; int port_num; struct net_device *dev; struct timer_list mib_counters_timer; spinlock_t mib_counters_lock; struct mib_counters mib_counters; struct work_struct tx_timeout_task; struct napi_struct napi; u32 int_mask; u8 oom; u8 work_link; u8 work_tx; u8 work_tx_end; u8 work_rx; u8 work_rx_refill; int skb_size; /* * RX state. */ int rx_ring_size; unsigned long rx_desc_sram_addr; int rx_desc_sram_size; int rxq_count; struct timer_list rx_oom; struct rx_queue rxq[8]; /* * TX state. */ int tx_ring_size; unsigned long tx_desc_sram_addr; int tx_desc_sram_size; int txq_count; struct tx_queue txq[8]; /* * Hardware-specific parameters. */ struct clk *clk; unsigned int t_clk; }; /* port register accessors **************************************************/ static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) { return readl(mp->shared->base + offset); } static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) { return readl(mp->base + offset); } static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) { writel(data, mp->shared->base + offset); } static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) { writel(data, mp->base + offset); } /* rxq/txq helper functions *************************************************/ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) { return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); } static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) { return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); } static void rxq_enable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); wrlp(mp, RXQ_COMMAND, 1 << rxq->index); } static void rxq_disable(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); u8 mask = 1 << rxq->index; wrlp(mp, RXQ_COMMAND, mask << 8); while (rdlp(mp, RXQ_COMMAND) & mask) udelay(10); } static void txq_reset_hw_ptr(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); u32 addr; addr = (u32)txq->tx_desc_dma; addr += txq->tx_curr_desc * sizeof(struct tx_desc); wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); } static void txq_enable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); wrlp(mp, TXQ_COMMAND, 1 << txq->index); } static void txq_disable(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); u8 mask = 1 << txq->index; wrlp(mp, TXQ_COMMAND, mask << 8); while (rdlp(mp, TXQ_COMMAND) & mask) udelay(10); } static void txq_maybe_wake(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); if (netif_tx_queue_stopped(nq)) { __netif_tx_lock(nq, smp_processor_id()); if (txq->tx_desc_count <= txq->tx_wake_threshold) netif_tx_wake_queue(nq); __netif_tx_unlock(nq); } } static int rxq_process(struct rx_queue *rxq, int budget) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); struct net_device_stats *stats = &mp->dev->stats; int rx; rx = 0; while (rx < budget && rxq->rx_desc_count) { struct rx_desc *rx_desc; unsigned int cmd_sts; struct sk_buff *skb; u16 byte_cnt; rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) break; rmb(); skb = rxq->rx_skb[rxq->rx_curr_desc]; rxq->rx_skb[rxq->rx_curr_desc] = NULL; rxq->rx_curr_desc++; if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq->rx_curr_desc = 0; dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); rxq->rx_desc_count--; rx++; mp->work_rx_refill |= 1 << rxq->index; byte_cnt = rx_desc->byte_cnt; /* * Update statistics. * * Note that the descriptor byte count includes 2 dummy * bytes automatically inserted by the hardware at the * start of the packet (which we don't count), and a 4 * byte CRC at the end of the packet (which we do count). */ stats->rx_packets++; stats->rx_bytes += byte_cnt - 2; /* * In case we received a packet without first / last bits * on, or the error summary bit is set, the packet needs * to be dropped. */ if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) != (RX_FIRST_DESC | RX_LAST_DESC)) goto err; /* * The -4 is for the CRC in the trailer of the * received packet */ skb_put(skb, byte_cnt - 2 - 4); if (cmd_sts & LAYER_4_CHECKSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; skb->protocol = eth_type_trans(skb, mp->dev); napi_gro_receive(&mp->napi, skb); continue; err: stats->rx_dropped++; if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) netdev_err(mp->dev, "received packet spanning multiple descriptors\n"); } if (cmd_sts & ERROR_SUMMARY) stats->rx_errors++; dev_kfree_skb(skb); } if (rx < budget) mp->work_rx &= ~(1 << rxq->index); return rx; } static int rxq_refill(struct rx_queue *rxq, int budget) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); int refilled; refilled = 0; while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { struct sk_buff *skb; int rx; struct rx_desc *rx_desc; int size; skb = netdev_alloc_skb(mp->dev, mp->skb_size); if (skb == NULL) { mp->oom = 1; goto oom; } if (SKB_DMA_REALIGN) skb_reserve(skb, SKB_DMA_REALIGN); refilled++; rxq->rx_desc_count++; rx = rxq->rx_used_desc++; if (rxq->rx_used_desc == rxq->rx_ring_size) rxq->rx_used_desc = 0; rx_desc = rxq->rx_desc_area + rx; size = skb_end_pointer(skb) - skb->data; rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, size, DMA_FROM_DEVICE); rx_desc->buf_size = size; rxq->rx_skb[rx] = skb; wmb(); rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; wmb(); /* * The hardware automatically prepends 2 bytes of * dummy data to each received packet, so that the * IP header ends up 16-byte aligned. */ skb_reserve(skb, 2); } if (refilled < budget) mp->work_rx_refill &= ~(1 << rxq->index); oom: return refilled; } /* tx ***********************************************************************/ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) { int frag; for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7) return 1; } return 0; } static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, u16 *l4i_chk, u32 *command, int length) { int ret; u32 cmd = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { int hdr_len; int tag_bytes; BUG_ON(skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_8021Q)); hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; tag_bytes = hdr_len - ETH_HLEN; if (length - hdr_len > mp->shared->tx_csum_limit || unlikely(tag_bytes & ~12)) { ret = skb_checksum_help(skb); if (!ret) goto no_csum; return ret; } if (tag_bytes & 4) cmd |= MAC_HDR_EXTRA_4_BYTES; if (tag_bytes & 8) cmd |= MAC_HDR_EXTRA_8_BYTES; cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | GEN_IP_V4_CHECKSUM | ip_hdr(skb)->ihl << TX_IHL_SHIFT; /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL * it seems we don't need to pass the initial checksum. */ switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: cmd |= UDP_FRAME; *l4i_chk = 0; break; case IPPROTO_TCP: *l4i_chk = 0; break; default: WARN(1, "protocol not supported"); } } else { no_csum: /* Errata BTS #50, IHL must be 5 if no HW checksum */ cmd |= 5 << TX_IHL_SHIFT; } *command = cmd; return 0; } static inline int txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, struct sk_buff *skb, char *data, int length, bool last_tcp, bool is_last) { int tx_index; u32 cmd_sts; struct tx_desc *desc; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; desc->l4i_chk = 0; desc->byte_cnt = length; if (length <= 8 && (uintptr_t)data & 0x7) { /* Copy unaligned small data fragment to TSO header data area */ memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, data, length); desc->buf_ptr = txq->tso_hdrs_dma + tx_index * TSO_HEADER_SIZE; } else { /* Alignment is okay, map buffer and hand off to hardware */ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; desc->buf_ptr = dma_map_single(dev->dev.parent, data, length, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { WARN(1, "dma_map_single failed!\n"); return -ENOMEM; } } cmd_sts = BUFFER_OWNED_BY_DMA; if (last_tcp) { /* last descriptor in the TCP packet */ cmd_sts |= ZERO_PADDING | TX_LAST_DESC; /* last descriptor in SKB */ if (is_last) cmd_sts |= TX_ENABLE_INTERRUPT; } desc->cmd_sts = cmd_sts; return 0; } static inline void txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length, u32 *first_cmd_sts, bool first_desc) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int hdr_len = skb_tcp_all_headers(skb); int tx_index; struct tx_desc *desc; int ret; u32 cmd_csum = 0; u16 l4i_chk = 0; u32 cmd_sts; tx_index = txq->tx_curr_desc; desc = &txq->tx_desc_area[tx_index]; ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); if (ret) WARN(1, "failed to prepare checksum!"); /* Should we set this? Can't use the value from skb_tx_csum() * as it's not the correct initial L4 checksum to use. */ desc->l4i_chk = 0; desc->byte_cnt = hdr_len; desc->buf_ptr = txq->tso_hdrs_dma + txq->tx_curr_desc * TSO_HEADER_SIZE; cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | GEN_CRC; /* Defer updating the first command descriptor until all * following descriptors have been written. */ if (first_desc) *first_cmd_sts = cmd_sts; else desc->cmd_sts = cmd_sts; txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; } static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int hdr_len, total_len, data_left, ret; int desc_count = 0; struct tso_t tso; struct tx_desc *first_tx_desc; u32 first_cmd_sts = 0; /* Count needed descriptors */ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { netdev_dbg(dev, "not enough descriptors for TSO!\n"); return -EBUSY; } first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; /* Initialize the TSO handler, and prepare the first payload */ hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { bool first_desc = (desc_count == 0); char *hdr; data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; desc_count++; /* prepare packet headers: MAC + IP + TCP */ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts, first_desc); while (data_left > 0) { int size; desc_count++; size = min_t(int, tso.size, data_left); ret = txq_put_data_tso(dev, txq, skb, tso.data, size, size == data_left, total_len == 0); if (ret) goto err_release; data_left -= size; tso_build_data(skb, &tso, size); } } __skb_queue_tail(&txq->tx_skb, skb); skb_tx_timestamp(skb); /* ensure all other descriptors are written before first cmd_sts */ wmb(); first_tx_desc->cmd_sts = first_cmd_sts; /* clear TX_END status */ mp->work_tx_end &= ~(1 << txq->index); /* ensure all descriptors are written before poking hardware */ wmb(); txq_enable(txq); txq->tx_desc_count += desc_count; return 0; err_release: /* TODO: Release all used data descriptors; header descriptors must not * be DMA-unmapped. */ return ret; } static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int frag; for (frag = 0; frag < nr_frags; frag++) { skb_frag_t *this_frag; int tx_index; struct tx_desc *desc; this_frag = &skb_shinfo(skb)->frags[frag]; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; /* * The last fragment will generate an interrupt * which will free the skb on TX completion. */ if (frag == nr_frags - 1) { desc->cmd_sts = BUFFER_OWNED_BY_DMA | ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; } else { desc->cmd_sts = BUFFER_OWNED_BY_DMA; } desc->l4i_chk = 0; desc->byte_cnt = skb_frag_size(this_frag); desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, this_frag, 0, desc->byte_cnt, DMA_TO_DEVICE); } } static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int tx_index; struct tx_desc *desc; u32 cmd_sts; u16 l4i_chk; int length, ret; cmd_sts = 0; l4i_chk = 0; if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { if (net_ratelimit()) netdev_err(dev, "tx queue full?!\n"); return -EBUSY; } ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); if (ret) return ret; cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; if (nr_frags) { txq_submit_frag_skb(txq, skb); length = skb_headlen(skb); } else { cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; length = skb->len; } desc->l4i_chk = l4i_chk; desc->byte_cnt = length; desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, length, DMA_TO_DEVICE); __skb_queue_tail(&txq->tx_skb, skb); skb_tx_timestamp(skb); /* ensure all other descriptors are written before first cmd_sts */ wmb(); desc->cmd_sts = cmd_sts; /* clear TX_END status */ mp->work_tx_end &= ~(1 << txq->index); /* ensure all descriptors are written before poking hardware */ wmb(); txq_enable(txq); txq->tx_desc_count += nr_frags + 1; return 0; } static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int length, queue, ret; struct tx_queue *txq; struct netdev_queue *nq; queue = skb_get_queue_mapping(skb); txq = mp->txq + queue; nq = netdev_get_tx_queue(dev, queue); if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { netdev_printk(KERN_DEBUG, dev, "failed to linearize skb with tiny unaligned fragment\n"); return NETDEV_TX_BUSY; } length = skb->len; if (skb_is_gso(skb)) ret = txq_submit_tso(txq, skb, dev); else ret = txq_submit_skb(txq, skb, dev); if (!ret) { txq->tx_bytes += length; txq->tx_packets++; if (txq->tx_desc_count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); } else { txq->tx_dropped++; dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } /* tx napi ******************************************************************/ static void txq_kick(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); u32 hw_desc_ptr; u32 expected_ptr; __netif_tx_lock(nq, smp_processor_id()); if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) goto out; hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); expected_ptr = (u32)txq->tx_desc_dma + txq->tx_curr_desc * sizeof(struct tx_desc); if (hw_desc_ptr != expected_ptr) txq_enable(txq); out: __netif_tx_unlock(nq); mp->work_tx_end &= ~(1 << txq->index); } static int txq_reclaim(struct tx_queue *txq, int budget, int force) { struct mv643xx_eth_private *mp = txq_to_mp(txq); struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; __netif_tx_lock_bh(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { int tx_index; struct tx_desc *desc; u32 cmd_sts; char desc_dma_map; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; desc_dma_map = txq->tx_desc_mapping[tx_index]; cmd_sts = desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) { if (!force) break; desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; } txq->tx_used_desc = tx_index + 1; if (txq->tx_used_desc == txq->tx_ring_size) txq->tx_used_desc = 0; reclaimed++; txq->tx_desc_count--; if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { if (desc_dma_map == DESC_DMA_MAP_PAGE) dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); else dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } if (cmd_sts & TX_ENABLE_INTERRUPT) { struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); if (!WARN_ON(!skb)) dev_consume_skb_any(skb); } if (cmd_sts & ERROR_SUMMARY) { netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } } __netif_tx_unlock_bh(nq); if (reclaimed < budget) mp->work_tx &= ~(1 << txq->index); return reclaimed; } /* tx rate control **********************************************************/ /* * Set total maximum TX rate (shared by all TX queues for this port) * to 'rate' bits per second, with a maximum burst of 'burst' bytes. */ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) { int token_rate; int mtu; int bucket_size; token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); if (token_rate > 1023) token_rate = 1023; mtu = (mp->dev->mtu + 255) >> 8; if (mtu > 63) mtu = 63; bucket_size = (burst + 255) >> 8; if (bucket_size > 65535) bucket_size = 65535; switch (mp->shared->tx_bw_control) { case TX_BW_CONTROL_OLD_LAYOUT: wrlp(mp, TX_BW_RATE, token_rate); wrlp(mp, TX_BW_MTU, mtu); wrlp(mp, TX_BW_BURST, bucket_size); break; case TX_BW_CONTROL_NEW_LAYOUT: wrlp(mp, TX_BW_RATE_MOVED, token_rate); wrlp(mp, TX_BW_MTU_MOVED, mtu); wrlp(mp, TX_BW_BURST_MOVED, bucket_size); break; } } static void txq_set_rate(struct tx_queue *txq, int rate, int burst) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int token_rate; int bucket_size; token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); if (token_rate > 1023) token_rate = 1023; bucket_size = (burst + 255) >> 8; if (bucket_size > 65535) bucket_size = 65535; wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); } static void txq_set_fixed_prio_mode(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); int off; u32 val; /* * Turn on fixed priority mode. */ off = 0; switch (mp->shared->tx_bw_control) { case TX_BW_CONTROL_OLD_LAYOUT: off = TXQ_FIX_PRIO_CONF; break; case TX_BW_CONTROL_NEW_LAYOUT: off = TXQ_FIX_PRIO_CONF_MOVED; break; } if (off) { val = rdlp(mp, off); val |= 1 << txq->index; wrlp(mp, off, val); } } /* mii management interface *************************************************/ static void mv643xx_eth_adjust_link(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); u32 autoneg_disable = FORCE_LINK_PASS | DISABLE_AUTO_NEG_SPEED_GMII | DISABLE_AUTO_NEG_FOR_FLOW_CTRL | DISABLE_AUTO_NEG_FOR_DUPLEX; if (dev->phydev->autoneg == AUTONEG_ENABLE) { /* enable auto negotiation */ pscr &= ~autoneg_disable; goto out_write; } pscr |= autoneg_disable; if (dev->phydev->speed == SPEED_1000) { /* force gigabit, half duplex not supported */ pscr |= SET_GMII_SPEED_TO_1000; pscr |= SET_FULL_DUPLEX_MODE; goto out_write; } pscr &= ~SET_GMII_SPEED_TO_1000; if (dev->phydev->speed == SPEED_100) pscr |= SET_MII_SPEED_TO_100; else pscr &= ~SET_MII_SPEED_TO_100; if (dev->phydev->duplex == DUPLEX_FULL) pscr |= SET_FULL_DUPLEX_MODE; else pscr &= ~SET_FULL_DUPLEX_MODE; out_write: wrlp(mp, PORT_SERIAL_CONTROL, pscr); } /* statistics ***************************************************************/ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned long tx_packets = 0; unsigned long tx_bytes = 0; unsigned long tx_dropped = 0; int i; for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; tx_packets += txq->tx_packets; tx_bytes += txq->tx_bytes; tx_dropped += txq->tx_dropped; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->tx_dropped = tx_dropped; return stats; } static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) { return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); } static void mib_counters_clear(struct mv643xx_eth_private *mp) { int i; for (i = 0; i < 0x80; i += 4) mib_read(mp, i); /* Clear non MIB hw counters also */ rdlp(mp, RX_DISCARD_FRAME_CNT); rdlp(mp, RX_OVERRUN_FRAME_CNT); } static void mib_counters_update(struct mv643xx_eth_private *mp) { struct mib_counters *p = &mp->mib_counters; spin_lock_bh(&mp->mib_counters_lock); p->good_octets_received += mib_read(mp, 0x00); p->bad_octets_received += mib_read(mp, 0x08); p->internal_mac_transmit_err += mib_read(mp, 0x0c); p->good_frames_received += mib_read(mp, 0x10); p->bad_frames_received += mib_read(mp, 0x14); p->broadcast_frames_received += mib_read(mp, 0x18); p->multicast_frames_received += mib_read(mp, 0x1c); p->frames_64_octets += mib_read(mp, 0x20); p->frames_65_to_127_octets += mib_read(mp, 0x24); p->frames_128_to_255_octets += mib_read(mp, 0x28); p->frames_256_to_511_octets += mib_read(mp, 0x2c); p->frames_512_to_1023_octets += mib_read(mp, 0x30); p->frames_1024_to_max_octets += mib_read(mp, 0x34); p->good_octets_sent += mib_read(mp, 0x38); p->good_frames_sent += mib_read(mp, 0x40); p->excessive_collision += mib_read(mp, 0x44); p->multicast_frames_sent += mib_read(mp, 0x48); p->broadcast_frames_sent += mib_read(mp, 0x4c); p->unrec_mac_control_received += mib_read(mp, 0x50); p->fc_sent += mib_read(mp, 0x54); p->good_fc_received += mib_read(mp, 0x58); p->bad_fc_received += mib_read(mp, 0x5c); p->undersize_received += mib_read(mp, 0x60); p->fragments_received += mib_read(mp, 0x64); p->oversize_received += mib_read(mp, 0x68); p->jabber_received += mib_read(mp, 0x6c); p->mac_receive_error += mib_read(mp, 0x70); p->bad_crc_event += mib_read(mp, 0x74); p->collision += mib_read(mp, 0x78); p->late_collision += mib_read(mp, 0x7c); /* Non MIB hardware counters */ p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); } static void mib_counters_timer_wrapper(struct timer_list *t) { struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); mib_counters_update(mp); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } /* interrupt coalescing *****************************************************/ /* * Hardware coalescing parameters are set in units of 64 t_clk * cycles. I.e.: * * coal_delay_in_usec = 64000000 * register_value / t_clk_rate * * register_value = coal_delay_in_usec * t_clk_rate / 64000000 * * In the ->set*() methods, we round the computed register value * to the nearest integer. */ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) { u32 val = rdlp(mp, SDMA_CONFIG); u64 temp; if (mp->shared->extended_rx_coal_limit) temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); else temp = (val & 0x003fff00) >> 8; temp *= 64000000; temp += mp->t_clk / 2; do_div(temp, mp->t_clk); return (unsigned int)temp; } static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) { u64 temp; u32 val; temp = (u64)usec * mp->t_clk; temp += 31999999; do_div(temp, 64000000); val = rdlp(mp, SDMA_CONFIG); if (mp->shared->extended_rx_coal_limit) { if (temp > 0xffff) temp = 0xffff; val &= ~0x023fff80; val |= (temp & 0x8000) << 10; val |= (temp & 0x7fff) << 7; } else { if (temp > 0x3fff) temp = 0x3fff; val &= ~0x003fff00; val |= (temp & 0x3fff) << 8; } wrlp(mp, SDMA_CONFIG, val); } static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) { u64 temp; temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; temp *= 64000000; temp += mp->t_clk / 2; do_div(temp, mp->t_clk); return (unsigned int)temp; } static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) { u64 temp; temp = (u64)usec * mp->t_clk; temp += 31999999; do_div(temp, 64000000); if (temp > 0x3fff) temp = 0x3fff; wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); } /* ethtool ******************************************************************/ struct mv643xx_eth_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int netdev_off; int mp_off; }; #define SSTAT(m) \ { #m, sizeof_field(struct net_device_stats, m), \ offsetof(struct net_device, stats.m), -1 } #define MIBSTAT(m) \ { #m, sizeof_field(struct mib_counters, m), \ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { SSTAT(rx_packets), SSTAT(tx_packets), SSTAT(rx_bytes), SSTAT(tx_bytes), SSTAT(rx_errors), SSTAT(tx_errors), SSTAT(rx_dropped), SSTAT(tx_dropped), MIBSTAT(good_octets_received), MIBSTAT(bad_octets_received), MIBSTAT(internal_mac_transmit_err), MIBSTAT(good_frames_received), MIBSTAT(bad_frames_received), MIBSTAT(broadcast_frames_received), MIBSTAT(multicast_frames_received), MIBSTAT(frames_64_octets), MIBSTAT(frames_65_to_127_octets), MIBSTAT(frames_128_to_255_octets), MIBSTAT(frames_256_to_511_octets), MIBSTAT(frames_512_to_1023_octets), MIBSTAT(frames_1024_to_max_octets), MIBSTAT(good_octets_sent), MIBSTAT(good_frames_sent), MIBSTAT(excessive_collision), MIBSTAT(multicast_frames_sent), MIBSTAT(broadcast_frames_sent), MIBSTAT(unrec_mac_control_received), MIBSTAT(fc_sent), MIBSTAT(good_fc_received), MIBSTAT(bad_fc_received), MIBSTAT(undersize_received), MIBSTAT(fragments_received), MIBSTAT(oversize_received), MIBSTAT(jabber_received), MIBSTAT(mac_receive_error), MIBSTAT(bad_crc_event), MIBSTAT(collision), MIBSTAT(late_collision), MIBSTAT(rx_discard), MIBSTAT(rx_overrun), }; static int mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mp->dev; phy_ethtool_ksettings_get(dev->phydev, cmd); /* * The MAC does not support 1000baseT_Half. */ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, cmd->link_modes.supported); linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, cmd->link_modes.advertising); return 0; } static int mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, struct ethtool_link_ksettings *cmd) { u32 port_status; u32 supported, advertising; port_status = rdlp(mp, PORT_STATUS); supported = SUPPORTED_MII; advertising = ADVERTISED_MII; switch (port_status & PORT_SPEED_MASK) { case PORT_SPEED_10: cmd->base.speed = SPEED_10; break; case PORT_SPEED_100: cmd->base.speed = SPEED_100; break; case PORT_SPEED_1000: cmd->base.speed = SPEED_1000; break; default: cmd->base.speed = -1; break; } cmd->base.duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; cmd->base.port = PORT_MII; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static void mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; if (dev->phydev) phy_ethtool_get_wol(dev->phydev, wol); } static int mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { int err; if (!dev->phydev) return -EOPNOTSUPP; err = phy_ethtool_set_wol(dev->phydev, wol); /* Given that mv643xx_eth works without the marvell-specific PHY driver, * this debugging hint is useful to have. */ if (err == -EOPNOTSUPP) netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); return err; } static int mv643xx_eth_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (dev->phydev) return mv643xx_eth_get_link_ksettings_phy(mp, cmd); else return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); } static int mv643xx_eth_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct ethtool_link_ksettings c = *cmd; u32 advertising; int ret; if (!dev->phydev) return -EINVAL; /* * The MAC does not support 1000baseT_Half. */ ethtool_convert_link_mode_to_legacy_u32(&advertising, c.link_modes.advertising); advertising &= ~ADVERTISED_1000baseT_Half; ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising, advertising); ret = phy_ethtool_ksettings_set(dev->phydev, &c); if (!ret) mv643xx_eth_adjust_link(dev); return ret; } static void mv643xx_eth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, mv643xx_eth_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->version, mv643xx_eth_driver_version, sizeof(drvinfo->version)); strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } static int mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); ec->rx_coalesce_usecs = get_rx_coal(mp); ec->tx_coalesce_usecs = get_tx_coal(mp); return 0; } static int mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); set_rx_coal(mp, ec->rx_coalesce_usecs); set_tx_coal(mp, ec->tx_coalesce_usecs); return 0; } static void mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er, struct kernel_ethtool_ringparam *kernel_er, struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); er->rx_max_pending = 4096; er->tx_max_pending = 4096; er->rx_pending = mp->rx_ring_size; er->tx_pending = mp->tx_ring_size; } static int mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er, struct kernel_ethtool_ringparam *kernel_er, struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); if (er->rx_mini_pending || er->rx_jumbo_pending) return -EINVAL; mp->rx_ring_size = min(er->rx_pending, 4096U); mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, MV643XX_MAX_SKB_DESCS * 2, 4096); if (mp->tx_ring_size != er->tx_pending) netdev_warn(dev, "TX queue size set to %u (requested %u)\n", mp->tx_ring_size, er->tx_pending); if (netif_running(dev)) { mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { netdev_err(dev, "fatal error on re-opening device after ring param change\n"); return -ENOMEM; } } return 0; } static int mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) { struct mv643xx_eth_private *mp = netdev_priv(dev); bool rx_csum = features & NETIF_F_RXCSUM; wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); return 0; } static void mv643xx_eth_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { int i; if (stringset == ETH_SS_STATS) { for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { memcpy(data + i * ETH_GSTRING_LEN, mv643xx_eth_stats[i].stat_string, ETH_GSTRING_LEN); } } } static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) { struct mv643xx_eth_private *mp = netdev_priv(dev); int i; mv643xx_eth_get_stats(dev); mib_counters_update(mp); for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { const struct mv643xx_eth_stats *stat; void *p; stat = mv643xx_eth_stats + i; if (stat->netdev_off >= 0) p = ((void *)mp->dev) + stat->netdev_off; else p = ((void *)mp) + stat->mp_off; data[i] = (stat->sizeof_stat == 8) ? *(uint64_t *)p : *(uint32_t *)p; } } static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) return ARRAY_SIZE(mv643xx_eth_stats); return -EOPNOTSUPP; } static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = mv643xx_eth_get_drvinfo, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = mv643xx_eth_get_coalesce, .set_coalesce = mv643xx_eth_set_coalesce, .get_ringparam = mv643xx_eth_get_ringparam, .set_ringparam = mv643xx_eth_set_ringparam, .get_strings = mv643xx_eth_get_strings, .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, .get_sset_count = mv643xx_eth_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, .get_wol = mv643xx_eth_get_wol, .set_wol = mv643xx_eth_set_wol, .get_link_ksettings = mv643xx_eth_get_link_ksettings, .set_link_ksettings = mv643xx_eth_set_link_ksettings, }; /* address handling *********************************************************/ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) { unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); addr[0] = (mac_h >> 24) & 0xff; addr[1] = (mac_h >> 16) & 0xff; addr[2] = (mac_h >> 8) & 0xff; addr[3] = mac_h & 0xff; addr[4] = (mac_l >> 8) & 0xff; addr[5] = mac_l & 0xff; } static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr) { wrlp(mp, MAC_ADDR_HIGH, (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); } static u32 uc_addr_filter_mask(struct net_device *dev) { struct netdev_hw_addr *ha; u32 nibbles; if (dev->flags & IFF_PROMISC) return 0; nibbles = 1 << (dev->dev_addr[5] & 0x0f); netdev_for_each_uc_addr(ha, dev) { if (memcmp(dev->dev_addr, ha->addr, 5)) return 0; if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) return 0; nibbles |= 1 << (ha->addr[5] & 0x0f); } return nibbles; } static void mv643xx_eth_program_unicast_filter(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 port_config; u32 nibbles; int i; uc_addr_set(mp, dev->dev_addr); port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; nibbles = uc_addr_filter_mask(dev); if (!nibbles) { port_config |= UNICAST_PROMISCUOUS_MODE; nibbles = 0xffff; } for (i = 0; i < 16; i += 4) { int off = UNICAST_TABLE(mp->port_num) + i; u32 v; v = 0; if (nibbles & 1) v |= 0x00000001; if (nibbles & 2) v |= 0x00000100; if (nibbles & 4) v |= 0x00010000; if (nibbles & 8) v |= 0x01000000; nibbles >>= 4; wrl(mp, off, v); } wrlp(mp, PORT_CONFIG, port_config); } static int addr_crc(unsigned char *addr) { int crc = 0; int i; for (i = 0; i < 6; i++) { int j; crc = (crc ^ addr[i]) << 8; for (j = 7; j >= 0; j--) { if (crc & (0x100 << j)) crc ^= 0x107 << j; } } return crc; } static void mv643xx_eth_program_multicast_filter(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); u32 *mc_spec; u32 *mc_other; struct netdev_hw_addr *ha; int i; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) goto promiscuous; /* Allocate both mc_spec and mc_other tables */ mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC); if (!mc_spec) goto promiscuous; mc_other = &mc_spec[64]; netdev_for_each_mc_addr(ha, dev) { u8 *a = ha->addr; u32 *table; u8 entry; if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { table = mc_spec; entry = a[5]; } else { table = mc_other; entry = addr_crc(a); } table[entry >> 2] |= 1 << (8 * (entry & 3)); } for (i = 0; i < 64; i++) { wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), mc_spec[i]); wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), mc_other[i]); } kfree(mc_spec); return; promiscuous: for (i = 0; i < 64; i++) { wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 0x01010101u); wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), 0x01010101u); } } static void mv643xx_eth_set_rx_mode(struct net_device *dev) { mv643xx_eth_program_unicast_filter(dev); mv643xx_eth_program_multicast_filter(dev); } static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, sa->sa_data); netif_addr_lock_bh(dev); mv643xx_eth_program_unicast_filter(dev); netif_addr_unlock_bh(dev); return 0; } /* rx/tx queue initialisation ***********************************************/ static int rxq_init(struct mv643xx_eth_private *mp, int index) { struct rx_queue *rxq = mp->rxq + index; struct rx_desc *rx_desc; int size; int i; rxq->index = index; rxq->rx_ring_size = mp->rx_ring_size; rxq->rx_desc_count = 0; rxq->rx_curr_desc = 0; rxq->rx_used_desc = 0; size = rxq->rx_ring_size * sizeof(struct rx_desc); if (index == 0 && size <= mp->rx_desc_sram_size) { rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; } else { rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, size, &rxq->rx_desc_dma, GFP_KERNEL); } if (rxq->rx_desc_area == NULL) { netdev_err(mp->dev, "can't allocate rx ring (%d bytes)\n", size); goto out; } memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) goto out_free; rx_desc = rxq->rx_desc_area; for (i = 0; i < rxq->rx_ring_size; i++) { int nexti; nexti = i + 1; if (nexti == rxq->rx_ring_size) nexti = 0; rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + nexti * sizeof(struct rx_desc); } return 0; out_free: if (index == 0 && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(mp->dev->dev.parent, size, rxq->rx_desc_area, rxq->rx_desc_dma); out: return -ENOMEM; } static void rxq_deinit(struct rx_queue *rxq) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); int i; rxq_disable(rxq); for (i = 0; i < rxq->rx_ring_size; i++) { if (rxq->rx_skb[i]) { dev_consume_skb_any(rxq->rx_skb[i]); rxq->rx_desc_count--; } } if (rxq->rx_desc_count) { netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", rxq->rx_desc_count); } if (rxq->index == 0 && rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq->rx_desc_area, rxq->rx_desc_dma); kfree(rxq->rx_skb); } static int txq_init(struct mv643xx_eth_private *mp, int index) { struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; int ret; int i; txq->index = index; txq->tx_ring_size = mp->tx_ring_size; /* A queue must always have room for at least one skb. * Therefore, stop the queue when the free entries reaches * the maximum number of descriptors per skb. */ txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; txq->tx_wake_threshold = txq->tx_stop_threshold / 2; txq->tx_desc_count = 0; txq->tx_curr_desc = 0; txq->tx_used_desc = 0; size = txq->tx_ring_size * sizeof(struct tx_desc); if (index == 0 && size <= mp->tx_desc_sram_size) { txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; } else { txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, size, &txq->tx_desc_dma, GFP_KERNEL); } if (txq->tx_desc_area == NULL) { netdev_err(mp->dev, "can't allocate tx ring (%d bytes)\n", size); return -ENOMEM; } memset(txq->tx_desc_area, 0, size); txq->tx_desc_area_size = size; tx_desc = txq->tx_desc_area; for (i = 0; i < txq->tx_ring_size; i++) { struct tx_desc *txd = tx_desc + i; int nexti; nexti = i + 1; if (nexti == txq->tx_ring_size) nexti = 0; txd->cmd_sts = 0; txd->next_desc_ptr = txq->tx_desc_dma + nexti * sizeof(struct tx_desc); } txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), GFP_KERNEL); if (!txq->tx_desc_mapping) { ret = -ENOMEM; goto err_free_desc_area; } /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (txq->tso_hdrs == NULL) { ret = -ENOMEM; goto err_free_desc_mapping; } skb_queue_head_init(&txq->tx_skb); return 0; err_free_desc_mapping: kfree(txq->tx_desc_mapping); err_free_desc_area: if (index == 0 && size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); return ret; } static void txq_deinit(struct tx_queue *txq) { struct mv643xx_eth_private *mp = txq_to_mp(txq); txq_disable(txq); txq_reclaim(txq, txq->tx_ring_size, 1); BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); if (txq->index == 0 && txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); kfree(txq->tx_desc_mapping); if (txq->tso_hdrs) dma_free_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, txq->tso_hdrs, txq->tso_hdrs_dma); } /* netdev ops and related ***************************************************/ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) { u32 int_cause; u32 int_cause_ext; int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; if (int_cause == 0) return 0; int_cause_ext = 0; if (int_cause & INT_EXT) { int_cause &= ~INT_EXT; int_cause_ext = rdlp(mp, INT_CAUSE_EXT); } if (int_cause) { wrlp(mp, INT_CAUSE, ~int_cause); mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & ~(rdlp(mp, TXQ_COMMAND) & 0xff); mp->work_rx |= (int_cause & INT_RX) >> 2; } int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; if (int_cause_ext) { wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); if (int_cause_ext & INT_EXT_LINK_PHY) mp->work_link = 1; mp->work_tx |= int_cause_ext & INT_EXT_TX; } return 1; } static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct mv643xx_eth_private *mp = netdev_priv(dev); if (unlikely(!mv643xx_eth_collect_events(mp))) return IRQ_NONE; wrlp(mp, INT_MASK, 0); napi_schedule(&mp->napi); return IRQ_HANDLED; } static void handle_link_event(struct mv643xx_eth_private *mp) { struct net_device *dev = mp->dev; u32 port_status; int speed; int duplex; int fc; port_status = rdlp(mp, PORT_STATUS); if (!(port_status & LINK_UP)) { if (netif_carrier_ok(dev)) { int i; netdev_info(dev, "link down\n"); netif_carrier_off(dev); for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; txq_reclaim(txq, txq->tx_ring_size, 1); txq_reset_hw_ptr(txq); } } return; } switch (port_status & PORT_SPEED_MASK) { case PORT_SPEED_10: speed = 10; break; case PORT_SPEED_100: speed = 100; break; case PORT_SPEED_1000: speed = 1000; break; default: speed = -1; break; } duplex = (port_status & FULL_DUPLEX) ? 1 : 0; fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", speed, duplex ? "full" : "half", fc ? "en" : "dis"); if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } static int mv643xx_eth_poll(struct napi_struct *napi, int budget) { struct mv643xx_eth_private *mp; int work_done; mp = container_of(napi, struct mv643xx_eth_private, napi); if (unlikely(mp->oom)) { mp->oom = 0; del_timer(&mp->rx_oom); } work_done = 0; while (work_done < budget) { u8 queue_mask; int queue; int work_tbd; if (mp->work_link) { mp->work_link = 0; handle_link_event(mp); work_done++; continue; } queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; if (likely(!mp->oom)) queue_mask |= mp->work_rx_refill; if (!queue_mask) { if (mv643xx_eth_collect_events(mp)) continue; break; } queue = fls(queue_mask) - 1; queue_mask = 1 << queue; work_tbd = budget - work_done; if (work_tbd > 16) work_tbd = 16; if (mp->work_tx_end & queue_mask) { txq_kick(mp->txq + queue); } else if (mp->work_tx & queue_mask) { work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); txq_maybe_wake(mp->txq + queue); } else if (mp->work_rx & queue_mask) { work_done += rxq_process(mp->rxq + queue, work_tbd); } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { work_done += rxq_refill(mp->rxq + queue, work_tbd); } else { BUG(); } } if (work_done < budget) { if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); napi_complete_done(napi, work_done); wrlp(mp, INT_MASK, mp->int_mask); } return work_done; } static inline void oom_timer_wrapper(struct timer_list *t) { struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); napi_schedule(&mp->napi); } static void port_start(struct mv643xx_eth_private *mp) { struct net_device *dev = mp->dev; u32 pscr; int i; /* * Perform PHY reset, if there is a PHY. */ if (dev->phydev) { struct ethtool_link_ksettings cmd; mv643xx_eth_get_link_ksettings(dev, &cmd); phy_init_hw(dev->phydev); mv643xx_eth_set_link_ksettings( dev, (const struct ethtool_link_ksettings *)&cmd); phy_start(dev->phydev); } /* * Configure basic link parameters. */ pscr = rdlp(mp, PORT_SERIAL_CONTROL); pscr |= SERIAL_PORT_ENABLE; wrlp(mp, PORT_SERIAL_CONTROL, pscr); pscr |= DO_NOT_FORCE_LINK_FAIL; if (!dev->phydev) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr); /* * Configure TX path and queues. */ tx_set_rate(mp, 1000000000, 16777216); for (i = 0; i < mp->txq_count; i++) { struct tx_queue *txq = mp->txq + i; txq_reset_hw_ptr(txq); txq_set_rate(txq, 1000000000, 16777216); txq_set_fixed_prio_mode(txq); } /* * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast * frames to RX queue #0, and include the pseudo-header when * calculating receive checksums. */ mv643xx_eth_set_features(mp->dev, mp->dev->features); /* * Treat BPDUs as normal multicasts, and disable partition mode. */ wrlp(mp, PORT_CONFIG_EXT, 0x00000000); /* * Add configured unicast addresses to address filter table. */ mv643xx_eth_program_unicast_filter(mp->dev); /* * Enable the receive queues. */ for (i = 0; i < mp->rxq_count; i++) { struct rx_queue *rxq = mp->rxq + i; u32 addr; addr = (u32)rxq->rx_desc_dma; addr += rxq->rx_curr_desc * sizeof(struct rx_desc); wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); rxq_enable(rxq); } } static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) { int skb_size; /* * Reserve 2+14 bytes for an ethernet header (the hardware * automatically prepends 2 bytes of dummy data to each * received packet), 16 bytes for up to four VLAN tags, and * 4 bytes for the trailing FCS -- 36 bytes total. */ skb_size = mp->dev->mtu + 36; /* * Make sure that the skb size is a multiple of 8 bytes, as * the lower three bits of the receive descriptor's buffer * size field are ignored by the hardware. */ mp->skb_size = (skb_size + 7) & ~7; /* * If NET_SKB_PAD is smaller than a cache line, * netdev_alloc_skb() will cause skb->data to be misaligned * to a cache line boundary. If this is the case, include * some extra space to allow re-aligning the data area. */ mp->skb_size += SKB_DMA_REALIGN; } static int mv643xx_eth_open(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int err; int i; wrlp(mp, INT_CAUSE, 0); wrlp(mp, INT_CAUSE_EXT, 0); rdlp(mp, INT_CAUSE_EXT); err = request_irq(dev->irq, mv643xx_eth_irq, IRQF_SHARED, dev->name, dev); if (err) { netdev_err(dev, "can't assign irq\n"); return -EAGAIN; } mv643xx_eth_recalc_skb_size(mp); napi_enable(&mp->napi); mp->int_mask = INT_EXT; for (i = 0; i < mp->rxq_count; i++) { err = rxq_init(mp, i); if (err) { while (--i >= 0) rxq_deinit(mp->rxq + i); goto out; } rxq_refill(mp->rxq + i, INT_MAX); mp->int_mask |= INT_RX_0 << i; } if (mp->oom) { mp->rx_oom.expires = jiffies + (HZ / 10); add_timer(&mp->rx_oom); } for (i = 0; i < mp->txq_count; i++) { err = txq_init(mp, i); if (err) { while (--i >= 0) txq_deinit(mp->txq + i); goto out_free; } mp->int_mask |= INT_TX_END_0 << i; } add_timer(&mp->mib_counters_timer); port_start(mp); wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); wrlp(mp, INT_MASK, mp->int_mask); return 0; out_free: for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); out: napi_disable(&mp->napi); free_irq(dev->irq, dev); return err; } static void port_reset(struct mv643xx_eth_private *mp) { unsigned int data; int i; for (i = 0; i < mp->rxq_count; i++) rxq_disable(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) txq_disable(mp->txq + i); while (1) { u32 ps = rdlp(mp, PORT_STATUS); if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) break; udelay(10); } /* Reset the Enable bit in the Configuration Register */ data = rdlp(mp, PORT_SERIAL_CONTROL); data &= ~(SERIAL_PORT_ENABLE | DO_NOT_FORCE_LINK_FAIL | FORCE_LINK_PASS); wrlp(mp, PORT_SERIAL_CONTROL, data); } static int mv643xx_eth_stop(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); int i; wrlp(mp, INT_MASK_EXT, 0x00000000); wrlp(mp, INT_MASK, 0x00000000); rdlp(mp, INT_MASK); napi_disable(&mp->napi); del_timer_sync(&mp->rx_oom); netif_carrier_off(dev); if (dev->phydev) phy_stop(dev->phydev); free_irq(dev->irq, dev); port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); del_timer_sync(&mp->mib_counters_timer); for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); for (i = 0; i < mp->txq_count; i++) txq_deinit(mp->txq + i); return 0; } static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int ret; if (!dev->phydev) return -ENOTSUPP; ret = phy_mii_ioctl(dev->phydev, ifr, cmd); if (!ret) mv643xx_eth_adjust_link(dev); return ret; } static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) { struct mv643xx_eth_private *mp = netdev_priv(dev); dev->mtu = new_mtu; mv643xx_eth_recalc_skb_size(mp); tx_set_rate(mp, 1000000000, 16777216); if (!netif_running(dev)) return 0; /* * Stop and then re-open the interface. This will allocate RX * skbs of the new MTU. * There is a possible danger that the open will not succeed, * due to memory being full. */ mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { netdev_err(dev, "fatal error on re-opening device after MTU change\n"); } return 0; } static void tx_timeout_task(struct work_struct *ugly) { struct mv643xx_eth_private *mp; mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); if (netif_running(mp->dev)) { netif_tx_stop_all_queues(mp->dev); port_reset(mp); port_start(mp); netif_tx_wake_all_queues(mp->dev); } } static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct mv643xx_eth_private *mp = netdev_priv(dev); netdev_info(dev, "tx timeout\n"); schedule_work(&mp->tx_timeout_task); } #ifdef CONFIG_NET_POLL_CONTROLLER static void mv643xx_eth_netpoll(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); wrlp(mp, INT_MASK, 0x00000000); rdlp(mp, INT_MASK); mv643xx_eth_irq(dev->irq, dev); wrlp(mp, INT_MASK, mp->int_mask); } #endif /* platform glue ************************************************************/ static void mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, const struct mbus_dram_target_info *dram) { void __iomem *base = msp->base; u32 win_enable; u32 win_protect; int i; for (i = 0; i < 6; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } win_enable = 0x3f; win_protect = 0; for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable &= ~(1 << i); win_protect |= 3 << (2 * i); } writel(win_enable, base + WINDOW_BAR_ENABLE); msp->win_protect = win_protect; } static void infer_hw_params(struct mv643xx_eth_shared_private *msp) { /* * Check whether we have a 14-bit coal limit field in bits * [21:8], or a 16-bit coal limit in bits [25,21:7] of the * SDMA config register. */ writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) msp->extended_rx_coal_limit = 1; else msp->extended_rx_coal_limit = 0; /* * Check whether the MAC supports TX rate control, and if * yes, whether its associated registers are in the old or * the new place. */ writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; } else { writel(7, msp->base + 0x0400 + TX_BW_RATE); if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; else msp->tx_bw_control = TX_BW_CONTROL_ABSENT; } } #if defined(CONFIG_OF) static const struct of_device_id mv643xx_eth_shared_ids[] = { { .compatible = "marvell,orion-eth", }, { .compatible = "marvell,kirkwood-eth", }, { } }; MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); #endif #ifdef CONFIG_OF_IRQ #define mv643xx_eth_property(_np, _name, _v) \ do { \ u32 tmp; \ if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ _v = tmp; \ } while (0) static struct platform_device *port_platdev[3]; static void mv643xx_eth_shared_of_remove(void) { int n; for (n = 0; n < 3; n++) { platform_device_del(port_platdev[n]); port_platdev[n] = NULL; } } static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct device_node *pnp) { struct platform_device *ppdev; struct mv643xx_eth_platform_data ppd; struct resource res; int ret; int dev_num = 0; memset(&ppd, 0, sizeof(ppd)); ppd.shared = pdev; memset(&res, 0, sizeof(res)); if (of_irq_to_resource(pnp, 0, &res) <= 0) { dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); return -EINVAL; } if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); return -EINVAL; } if (ppd.port_number >= 3) { dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); return -EINVAL; } while (dev_num < 3 && port_platdev[dev_num]) dev_num++; if (dev_num == 3) { dev_err(&pdev->dev, "too many ports registered\n"); return -EINVAL; } ret = of_get_mac_address(pnp, ppd.mac_addr); if (ret == -EPROBE_DEFER) return ret; mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); of_get_phy_mode(pnp, &ppd.interface); ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); if (!ppd.phy_node) { ppd.phy_addr = MV643XX_ETH_PHY_NONE; of_property_read_u32(pnp, "speed", &ppd.speed); of_property_read_u32(pnp, "duplex", &ppd.duplex); } ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); if (!ppdev) return -ENOMEM; ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ppdev->dev.of_node = pnp; ret = platform_device_add_resources(ppdev, &res, 1); if (ret) goto port_err; ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); if (ret) goto port_err; ret = platform_device_add(ppdev); if (ret) goto port_err; port_platdev[dev_num] = ppdev; return 0; port_err: platform_device_put(ppdev); return ret; } static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { struct mv643xx_eth_shared_platform_data *pd; struct device_node *pnp, *np = pdev->dev.of_node; int ret; /* bail out if not registered from DT */ if (!np) return 0; pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; pdev->dev.platform_data = pd; mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); for_each_available_child_of_node(np, pnp) { ret = mv643xx_eth_shared_of_add_port(pdev, pnp); if (ret) { of_node_put(pnp); mv643xx_eth_shared_of_remove(); return ret; } } return 0; } #else static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { return 0; } static inline void mv643xx_eth_shared_of_remove(void) { } #endif static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_eth_version_printed; struct mv643xx_eth_shared_platform_data *pd; struct mv643xx_eth_shared_private *msp; const struct mbus_dram_target_info *dram; struct resource *res; int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", mv643xx_eth_driver_version); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (msp == NULL) return -ENOMEM; platform_set_drvdata(pdev, msp); msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (msp->base == NULL) return -ENOMEM; msp->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(msp->clk)) clk_prepare_enable(msp->clk); /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv643xx_eth_conf_mbus_windows(msp, dram); ret = mv643xx_eth_shared_of_probe(pdev); if (ret) goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); return 0; err_put_clk: if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); return 0; } static struct platform_driver mv643xx_eth_shared_driver = { .probe = mv643xx_eth_shared_probe, .remove = mv643xx_eth_shared_remove, .driver = { .name = MV643XX_ETH_SHARED_NAME, .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), }, }; static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) { int addr_shift = 5 * mp->port_num; u32 data; data = rdl(mp, PHY_ADDR); data &= ~(0x1f << addr_shift); data |= (phy_addr & 0x1f) << addr_shift; wrl(mp, PHY_ADDR, data); } static int phy_addr_get(struct mv643xx_eth_private *mp) { unsigned int data; data = rdl(mp, PHY_ADDR); return (data >> (5 * mp->port_num)) & 0x1f; } static void set_params(struct mv643xx_eth_private *mp, struct mv643xx_eth_platform_data *pd) { struct net_device *dev = mp->dev; unsigned int tx_ring_size; if (is_valid_ether_addr(pd->mac_addr)) { eth_hw_addr_set(dev, pd->mac_addr); } else { u8 addr[ETH_ALEN]; uc_addr_get(mp, addr); eth_hw_addr_set(dev, addr); } mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; if (pd->rx_queue_size) mp->rx_ring_size = pd->rx_queue_size; mp->rx_desc_sram_addr = pd->rx_sram_addr; mp->rx_desc_sram_size = pd->rx_sram_size; mp->rxq_count = pd->rx_queue_count ? : 1; tx_ring_size = DEFAULT_TX_QUEUE_SIZE; if (pd->tx_queue_size) tx_ring_size = pd->tx_queue_size; mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, MV643XX_MAX_SKB_DESCS * 2, 4096); if (mp->tx_ring_size != tx_ring_size) netdev_warn(dev, "TX queue size set to %u (requested %u)\n", mp->tx_ring_size, tx_ring_size); mp->tx_desc_sram_addr = pd->tx_sram_addr; mp->tx_desc_sram_size = pd->tx_sram_size; mp->txq_count = pd->tx_queue_count ? : 1; } static int get_phy_mode(struct mv643xx_eth_private *mp) { struct device *dev = mp->dev->dev.parent; phy_interface_t iface; int err; if (dev->of_node) err = of_get_phy_mode(dev->of_node, &iface); /* Historical default if unspecified. We could also read/write * the interface state in the PSC1 */ if (!dev->of_node || err) iface = PHY_INTERFACE_MODE_GMII; return iface; } static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { struct phy_device *phydev; int start; int num; int i; char phy_id[MII_BUS_ID_SIZE + 3]; if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { start = phy_addr_get(mp) & 0x1f; num = 32; } else { start = phy_addr & 0x1f; num = 1; } /* Attempt to connect to the PHY using orion-mdio */ phydev = ERR_PTR(-ENODEV); for (i = 0; i < num; i++) { int addr = (start + i) & 0x1f; snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "orion-mdio-mii", addr); phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, get_phy_mode(mp)); if (!IS_ERR(phydev)) { phy_addr_set(mp, addr); break; } } return phydev; } static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) { struct net_device *dev = mp->dev; struct phy_device *phy = dev->phydev; if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; linkmode_copy(phy->advertising, phy->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phy->advertising); } else { phy->autoneg = AUTONEG_DISABLE; linkmode_zero(phy->advertising); phy->speed = speed; phy->duplex = duplex; } phy_start_aneg(phy); } static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) { struct net_device *dev = mp->dev; u32 pscr; pscr = rdlp(mp, PORT_SERIAL_CONTROL); if (pscr & SERIAL_PORT_ENABLE) { pscr &= ~SERIAL_PORT_ENABLE; wrlp(mp, PORT_SERIAL_CONTROL, pscr); } pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; if (!dev->phydev) { pscr |= DISABLE_AUTO_NEG_SPEED_GMII; if (speed == SPEED_1000) pscr |= SET_GMII_SPEED_TO_1000; else if (speed == SPEED_100) pscr |= SET_MII_SPEED_TO_100; pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; if (duplex == DUPLEX_FULL) pscr |= SET_FULL_DUPLEX_MODE; } wrlp(mp, PORT_SERIAL_CONTROL, pscr); } static const struct net_device_ops mv643xx_eth_netdev_ops = { .ndo_open = mv643xx_eth_open, .ndo_stop = mv643xx_eth_stop, .ndo_start_xmit = mv643xx_eth_xmit, .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, .ndo_set_mac_address = mv643xx_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = mv643xx_eth_ioctl, .ndo_change_mtu = mv643xx_eth_change_mtu, .ndo_set_features = mv643xx_eth_set_features, .ndo_tx_timeout = mv643xx_eth_tx_timeout, .ndo_get_stats = mv643xx_eth_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mv643xx_eth_netpoll, #endif }; static int mv643xx_eth_probe(struct platform_device *pdev) { struct mv643xx_eth_platform_data *pd; struct mv643xx_eth_private *mp; struct net_device *dev; struct phy_device *phydev = NULL; u32 psc1r; int err, irq; pd = dev_get_platdata(&pdev->dev); if (pd == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); return -ENODEV; } if (pd->shared == NULL) { dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); return -ENODEV; } dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); mp = netdev_priv(dev); platform_set_drvdata(pdev, mp); mp->shared = platform_get_drvdata(pd->shared); mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); mp->port_num = pd->port_number; mp->dev = dev; if (of_device_is_compatible(pdev->dev.of_node, "marvell,kirkwood-eth-port")) { psc1r = rdlp(mp, PORT_SERIAL_CONTROL1); /* Kirkwood resets some registers on gated clocks. Especially * CLK125_BYPASS_EN must be cleared but is not available on * all other SoCs/System Controllers using this driver. */ psc1r &= ~CLK125_BYPASS_EN; /* On Kirkwood with two Ethernet controllers, if both of them * have RGMII_EN disabled, the first controller will be in GMII * mode and the second one is effectively disabled, instead of * two MII interfaces. * * To enable GMII in the first controller, the second one must * also be configured (and may be enabled) with RGMII_EN * disabled too, even though it cannot be used at all. */ switch (pd->interface) { /* Use internal to denote second controller being disabled */ case PHY_INTERFACE_MODE_INTERNAL: case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_GMII: psc1r &= ~RGMII_EN; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: psc1r |= RGMII_EN; break; default: /* Unknown; don't touch */ break; } wrlp(mp, PORT_SERIAL_CONTROL1, psc1r); } /* * Start with a default rate, and if there is a clock, allow * it to override the default. */ mp->t_clk = 133000000; mp->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(mp->clk)) { clk_prepare_enable(mp->clk); mp->t_clk = clk_get_rate(mp->clk); } else if (!IS_ERR(mp->shared->clk)) { mp->t_clk = clk_get_rate(mp->shared->clk); } set_params(mp, pd); netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); err = 0; if (pd->phy_node) { phydev = of_phy_connect(mp->dev, pd->phy_node, mv643xx_eth_adjust_link, 0, get_phy_mode(mp)); if (!phydev) err = -ENODEV; else phy_addr_set(mp, phydev->mdio.addr); } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { phydev = phy_scan(mp, pd->phy_addr); if (IS_ERR(phydev)) err = PTR_ERR(phydev); else phy_init(mp, pd->speed, pd->duplex); } if (err == -ENODEV) { err = -EPROBE_DEFER; goto out; } if (err) goto out; dev->ethtool_ops = &mv643xx_eth_ethtool_ops; init_pscr(mp, pd->speed, pd->duplex); mib_counters_clear(mp); timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); mp->mib_counters_timer.expires = jiffies + 30 * HZ; spin_lock_init(&mp->mib_counters_lock); INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); netif_napi_add(dev, &mp->napi, mv643xx_eth_poll); timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); irq = platform_get_irq(pdev, 0); if (WARN_ON(irq < 0)) { err = irq; goto out; } dev->irq = irq; dev->netdev_ops = &mv643xx_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->vlan_features = dev->features; dev->features |= NETIF_F_RXCSUM; dev->hw_features = dev->features; dev->priv_flags |= IFF_UNICAST_FLT; netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS); /* MTU range: 64 - 9500 */ dev->min_mtu = 64; dev->max_mtu = 9500; if (mp->shared->win_protect) wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); netif_carrier_off(dev); wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); set_rx_coal(mp, 250); set_tx_coal(mp, 0); err = register_netdev(dev); if (err) goto out; netdev_notice(dev, "port %d with MAC address %pM\n", mp->port_num, dev->dev_addr); if (mp->tx_desc_sram_size > 0) netdev_notice(dev, "configured with sram\n"); return 0; out: if (!IS_ERR(mp->clk)) clk_disable_unprepare(mp->clk); free_netdev(dev); return err; } static int mv643xx_eth_remove(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); struct net_device *dev = mp->dev; unregister_netdev(mp->dev); if (dev->phydev) phy_disconnect(dev->phydev); cancel_work_sync(&mp->tx_timeout_task); if (!IS_ERR(mp->clk)) clk_disable_unprepare(mp->clk); free_netdev(mp->dev); return 0; } static void mv643xx_eth_shutdown(struct platform_device *pdev) { struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); /* Mask all interrupts on ethernet port */ wrlp(mp, INT_MASK, 0); rdlp(mp, INT_MASK); if (netif_running(mp->dev)) port_reset(mp); } static struct platform_driver mv643xx_eth_driver = { .probe = mv643xx_eth_probe, .remove = mv643xx_eth_remove, .shutdown = mv643xx_eth_shutdown, .driver = { .name = MV643XX_ETH_NAME, }, }; static struct platform_driver * const drivers[] = { &mv643xx_eth_shared_driver, &mv643xx_eth_driver, }; static int __init mv643xx_eth_init_module(void) { return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(mv643xx_eth_init_module); static void __exit mv643xx_eth_cleanup_module(void) { platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(mv643xx_eth_cleanup_module); MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
linux-master
drivers/net/ethernet/marvell/mv643xx_eth.c
/* * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. * * Copyright (C) 2012 Marvell * * Rami Rosen <[email protected]> * Thomas Petazzoni <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/cpu.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/inetdevice.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mbus.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy/phy.h> #include <linux/phy.h> #include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> #include "mvneta_bm.h" #include <net/ip.h> #include <net/ipv6.h> #include <net/tso.h> #include <net/page_pool/helpers.h> #include <net/pkt_sched.h> #include <linux/bpf_trace.h> /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 #define MVNETA_PORT_RX_RESET 0x1cc0 #define MVNETA_PORT_RX_DMA_RESET BIT(0) #define MVNETA_PHY_ADDR 0x2000 #define MVNETA_PHY_ADDR_MASK 0x1f #define MVNETA_MBUS_RETRY 0x2010 #define MVNETA_UNIT_INTR_CAUSE 0x2080 #define MVNETA_UNIT_CONTROL 0x20B0 #define MVNETA_PHY_POLLING_ENABLE BIT(1) #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) #define MVNETA_BASE_ADDR_ENABLE 0x2290 #define MVNETA_AC5_CNM_DDR_TARGET 0x2 #define MVNETA_AC5_CNM_DDR_ATTR 0xb #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 #define MVNETA_PORT_CONFIG 0x2400 #define MVNETA_UNI_PROMISC_MODE BIT(0) #define MVNETA_DEF_RXQ(q) ((q) << 1) #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) #define MVNETA_TX_UNSET_ERR_SUM BIT(12) #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ MVNETA_DEF_RXQ_ARP(q) | \ MVNETA_DEF_RXQ_TCP(q) | \ MVNETA_DEF_RXQ_UDP(q) | \ MVNETA_DEF_RXQ_BPDU(q) | \ MVNETA_TX_UNSET_ERR_SUM | \ MVNETA_RX_CSUM_WITH_PSEUDO_HDR) #define MVNETA_PORT_CONFIG_EXTEND 0x2404 #define MVNETA_MAC_ADDR_LOW 0x2414 #define MVNETA_MAC_ADDR_HIGH 0x2418 #define MVNETA_SDMA_CONFIG 0x241c #define MVNETA_SDMA_BRST_SIZE_16 4 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) #define MVNETA_RX_NO_DATA_SWAP BIT(4) #define MVNETA_TX_NO_DATA_SWAP BIT(5) #define MVNETA_DESC_SWAP BIT(6) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 #define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ #define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 #define MVNETA_HSGMII_SERDES_PROTO 0x1107 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 #define MVNETA_TXQ_CMD 0x2448 #define MVNETA_TXQ_DISABLE_SHIFT 8 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 #define MVNETA_BM_ADDRESS 0x2504 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) /* Exception Interrupt Port/Queue Cause register * * Their behavior depend of the mapping done using the PCPX2Q * registers. For a given CPU if the bit associated to a queue is not * set, then for the register a read from this CPU will always return * 0 and a write won't do anything */ #define MVNETA_INTR_NEW_CAUSE 0x25a0 #define MVNETA_INTR_NEW_MASK 0x25a4 /* bits 0..7 = TXQ SENT, one bit per queue. * bits 8..15 = RXQ OCCUP, one bit per queue. * bits 16..23 = RXQ FREE, one bit per queue. * bit 29 = OLD_REG_SUM, see old reg ? * bit 30 = TX_ERR_SUM, one bit for 4 ports * bit 31 = MISC_SUM, one bit for 4 ports */ #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) #define MVNETA_MISCINTR_INTR_MASK BIT(31) #define MVNETA_INTR_OLD_CAUSE 0x25a8 #define MVNETA_INTR_OLD_MASK 0x25ac /* Data Path Port/Queue Cause Register */ #define MVNETA_INTR_MISC_CAUSE 0x25b0 #define MVNETA_INTR_MISC_MASK 0x25b4 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) #define MVNETA_CAUSE_LINK_CHANGE BIT(1) #define MVNETA_CAUSE_PTP BIT(4) #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) #define MVNETA_CAUSE_RX_OVERRUN BIT(8) #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) #define MVNETA_CAUSE_TX_UNDERUN BIT(11) #define MVNETA_CAUSE_PRBS_ERR BIT(12) #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) #define MVNETA_GMAC_CTRL_0 0x2c00 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) #define MVNETA_GMAC2_PCS_ENABLE BIT(3) #define MVNETA_GMAC2_PORT_RGMII BIT(4) #define MVNETA_GMAC2_PORT_RESET BIT(6) #define MVNETA_GMAC_STATUS 0x2c10 #define MVNETA_GMAC_LINK_UP BIT(0) #define MVNETA_GMAC_SPEED_1000 BIT(1) #define MVNETA_GMAC_SPEED_100 BIT(2) #define MVNETA_GMAC_FULL_DUPLEX BIT(3) #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) #define MVNETA_GMAC_AN_COMPLETE BIT(11) #define MVNETA_GMAC_SYNC_OK BIT(14) #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_GMAC_CTRL_4 0x2c90 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) #define MVNETA_MIB_COUNTERS_BASE 0x3000 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 #define MVNETA_DA_FILT_UCAST_BASE 0x3600 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_DEC_SENT_SHIFT 16 #define MVNETA_TXQ_DEC_SENT_MASK 0xff #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_PORT_TX_RESET 0x3cf0 #define MVNETA_PORT_TX_DMA_RESET BIT(0) #define MVNETA_TXQ_CMD1_REG 0x3e00 #define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) #define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) #define MVNETA_REFILL_NUM_CLK_REG 0x3e08 #define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff #define MVNETA_TX_MTU 0x3e0c #define MVNETA_TX_TOKEN_SIZE 0x3e14 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff #define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 #define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff /* The values of the bucket refill base period and refill period are taken from * the reference manual, and adds up to a base resolution of 10Kbps. This allows * to cover all rate-limit values from 10Kbps up to 5Gbps */ /* Base period for the rate limit algorithm */ #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 /* Number of Base Period to wait between each bucket refill */ #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 /* The base resolution for rate limiting, in bps. Any max_rate value should be * a multiple of that value. */ #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ MVNETA_TXQ_BUCKET_REFILL_PERIOD)) #define MVNETA_LPI_CTRL_0 0x2cc0 #define MVNETA_LPI_CTRL_1 0x2cc4 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) #define MVNETA_LPI_CTRL_2 0x2cc8 #define MVNETA_LPI_STATUS 0x2ccc #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff /* Descriptor ring Macros */ #define MVNETA_QUEUE_NEXT_DESC(q, index) \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) /* Various constants */ /* Coalescing */ #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 /* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not * supported by this driver) or is filled automatically by zeroes on * the RX side. Those two bytes being at the front of the Ethernet * header, they allow to have the IP header aligned on a 4 bytes * boundary automatically: the hardware skips those two bytes on its * own. */ #define MVNETA_MH_SIZE 2 #define MVNETA_VLAN_TAG_LEN 4 #define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 #define MVNETA_ACC_MODE_EXT1 1 #define MVNETA_ACC_MODE_EXT2 2 #define MVNETA_MAX_DECODE_WIN 6 /* Timeout constants */ #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 #define MVNETA_TX_MTU_MAX 0x3ffff /* The RSS lookup table actually has 256 entries but we do not use * them yet */ #define MVNETA_RSS_LU_TABLE_SIZE 1 /* Max number of Rx descriptors */ #define MVNETA_MAX_RXD 512 /* Max number of Tx descriptors */ #define MVNETA_MAX_TXD 1024 /* Max number of allowed TCP segments for software TSO */ #define MVNETA_MAX_TSO_SEGS 100 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) /* The size of a TSO header page */ #define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE) /* Number of TSO headers per page. This should be a power of 2 */ #define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE) /* Maximum number of TSO header pages */ #define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE) /* descriptor aligned size */ #define MVNETA_DESC_ALIGNED_SIZE 32 /* Number of bytes to be taken into account by HW when putting incoming data * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. */ #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 #define MVNETA_RX_PKT_SIZE(mtu) \ ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ ETH_HLEN + ETH_FCS_LEN, \ cache_line_size()) /* Driver assumes that the last 3 bits are 0 */ #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ MVNETA_SKB_HEADROOM)) #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) enum { ETHTOOL_STAT_EEE_WAKEUP, ETHTOOL_STAT_SKB_ALLOC_ERR, ETHTOOL_STAT_REFILL_ERR, ETHTOOL_XDP_REDIRECT, ETHTOOL_XDP_PASS, ETHTOOL_XDP_DROP, ETHTOOL_XDP_TX, ETHTOOL_XDP_TX_ERR, ETHTOOL_XDP_XMIT, ETHTOOL_XDP_XMIT_ERR, ETHTOOL_MAX_STATS, }; struct mvneta_statistic { unsigned short offset; unsigned short type; const char name[ETH_GSTRING_LEN]; }; #define T_REG_32 32 #define T_REG_64 64 #define T_SW 1 #define MVNETA_XDP_PASS 0 #define MVNETA_XDP_DROPPED BIT(0) #define MVNETA_XDP_TX BIT(1) #define MVNETA_XDP_REDIR BIT(2) static const struct mvneta_statistic mvneta_statistics[] = { { 0x3000, T_REG_64, "good_octets_received", }, { 0x3010, T_REG_32, "good_frames_received", }, { 0x3008, T_REG_32, "bad_octets_received", }, { 0x3014, T_REG_32, "bad_frames_received", }, { 0x3018, T_REG_32, "broadcast_frames_received", }, { 0x301c, T_REG_32, "multicast_frames_received", }, { 0x3050, T_REG_32, "unrec_mac_control_received", }, { 0x3058, T_REG_32, "good_fc_received", }, { 0x305c, T_REG_32, "bad_fc_received", }, { 0x3060, T_REG_32, "undersize_received", }, { 0x3064, T_REG_32, "fragments_received", }, { 0x3068, T_REG_32, "oversize_received", }, { 0x306c, T_REG_32, "jabber_received", }, { 0x3070, T_REG_32, "mac_receive_error", }, { 0x3074, T_REG_32, "bad_crc_event", }, { 0x3078, T_REG_32, "collision", }, { 0x307c, T_REG_32, "late_collision", }, { 0x2484, T_REG_32, "rx_discard", }, { 0x2488, T_REG_32, "rx_overrun", }, { 0x3020, T_REG_32, "frames_64_octets", }, { 0x3024, T_REG_32, "frames_65_to_127_octets", }, { 0x3028, T_REG_32, "frames_128_to_255_octets", }, { 0x302c, T_REG_32, "frames_256_to_511_octets", }, { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, { 0x3038, T_REG_64, "good_octets_sent", }, { 0x3040, T_REG_32, "good_frames_sent", }, { 0x3044, T_REG_32, "excessive_collision", }, { 0x3048, T_REG_32, "multicast_frames_sent", }, { 0x304c, T_REG_32, "broadcast_frames_sent", }, { 0x3054, T_REG_32, "fc_sent", }, { 0x300c, T_REG_32, "internal_mac_transmit_err", }, { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", }, { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", }, { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", }, { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", }, { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", }, { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", }, { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", }, }; struct mvneta_stats { u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; /* xdp */ u64 xdp_redirect; u64 xdp_pass; u64 xdp_drop; u64 xdp_xmit; u64 xdp_xmit_err; u64 xdp_tx; u64 xdp_tx_err; }; struct mvneta_ethtool_stats { struct mvneta_stats ps; u64 skb_alloc_error; u64 refill_error; }; struct mvneta_pcpu_stats { struct u64_stats_sync syncp; struct mvneta_ethtool_stats es; u64 rx_dropped; u64 rx_errors; }; struct mvneta_pcpu_port { /* Pointer to the shared port */ struct mvneta_port *pp; /* Pointer to the CPU-local NAPI struct */ struct napi_struct napi; /* Cause of the previous interrupt */ u32 cause_rx_tx; }; enum { __MVNETA_DOWN, }; struct mvneta_port { u8 id; struct mvneta_pcpu_port __percpu *ports; struct mvneta_pcpu_stats __percpu *stats; unsigned long state; int pkt_size; void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; struct net_device *dev; struct hlist_node node_online; struct hlist_node node_dead; int rxq_def; /* Protect the access to the percpu interrupt registers, * ensuring that the configuration remains coherent. */ spinlock_t lock; bool is_stopped; u32 cause_rx_tx; struct napi_struct napi; struct bpf_prog *xdp_prog; /* Core clock */ struct clk *clk; /* AXI clock */ struct clk *clk_bus; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; phy_interface_t phy_interface; struct device_node *dn; unsigned int tx_csum_limit; struct phylink *phylink; struct phylink_config phylink_config; struct phylink_pcs phylink_pcs; struct phy *comphy; struct mvneta_bm *bm_priv; struct mvneta_bm_pool *pool_long; struct mvneta_bm_pool *pool_short; int bm_win_id; bool eee_enabled; bool eee_active; bool tx_lpi_enabled; u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; /* Flags for special SoC configurations */ bool neta_armada3700; bool neta_ac5; u16 rx_offset_correction; const struct mbus_dram_target_info *dram_target_info; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design */ #define MVNETA_TX_L3_OFF_SHIFT 0 #define MVNETA_TX_IP_HLEN_SHIFT 8 #define MVNETA_TX_L4_UDP BIT(16) #define MVNETA_TX_L3_IP6 BIT(17) #define MVNETA_TXD_IP_CSUM BIT(18) #define MVNETA_TXD_Z_PAD BIT(19) #define MVNETA_TXD_L_DESC BIT(20) #define MVNETA_TXD_F_DESC BIT(21) #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ MVNETA_TXD_L_DESC | \ MVNETA_TXD_F_DESC) #define MVNETA_TX_L4_CSUM_FULL BIT(30) #define MVNETA_TX_L4_CSUM_NOT BIT(31) #define MVNETA_RXD_ERR_CRC 0x0 #define MVNETA_RXD_BM_POOL_SHIFT 13 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) #define MVNETA_RXD_ERR_LEN BIT(18) #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) #define MVNETA_RXD_L3_IP4 BIT(25) #define MVNETA_RXD_LAST_DESC BIT(26) #define MVNETA_RXD_FIRST_DESC BIT(27) #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ MVNETA_RXD_LAST_DESC) #define MVNETA_RXD_L4_CSUM_OK BIT(30) #if defined(__LITTLE_ENDIAN) struct mvneta_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u16 reserved1; /* csum_l4 (for future use) */ u16 data_size; /* Data size of transmitted packet in bytes */ u32 buf_phys_addr; /* Physical addr of transmitted buffer */ u32 reserved2; /* hw_cmd - (for future use, PMT) */ u32 reserved3[4]; /* Reserved - (for future use) */ }; struct mvneta_rx_desc { u32 status; /* Info about received packet */ u16 reserved1; /* pnc_info - (for future use, PnC) */ u16 data_size; /* Size of received packet in bytes */ u32 buf_phys_addr; /* Physical address of the buffer */ u32 reserved2; /* pnc_flow_id (for future use, PnC) */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved3; /* prefetch_cmd, for future use */ u16 reserved4; /* csum_l4 - (for future use, PnC) */ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; #else struct mvneta_tx_desc { u16 data_size; /* Data size of transmitted packet in bytes */ u16 reserved1; /* csum_l4 (for future use) */ u32 command; /* Options used by HW for packet transmitting.*/ u32 reserved2; /* hw_cmd - (for future use, PMT) */ u32 buf_phys_addr; /* Physical addr of transmitted buffer */ u32 reserved3[4]; /* Reserved - (for future use) */ }; struct mvneta_rx_desc { u16 data_size; /* Size of received packet in bytes */ u16 reserved1; /* pnc_info - (for future use, PnC) */ u32 status; /* Info about received packet */ u32 reserved2; /* pnc_flow_id (for future use, PnC) */ u32 buf_phys_addr; /* Physical address of the buffer */ u16 reserved4; /* csum_l4 - (for future use, PnC) */ u16 reserved3; /* prefetch_cmd, for future use */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; #endif enum mvneta_tx_buf_type { MVNETA_TYPE_TSO, MVNETA_TYPE_SKB, MVNETA_TYPE_XDP_TX, MVNETA_TYPE_XDP_NDO, }; struct mvneta_tx_buf { enum mvneta_tx_buf_type type; union { struct xdp_frame *xdpf; struct sk_buff *skb; }; }; struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; /* Number of TX DMA descriptors in the descriptor ring */ int size; /* Number of currently used TX DMA descriptor in the * descriptor ring */ int count; int pending; int tx_stop_threshold; int tx_wake_threshold; /* Array of transmitted buffers */ struct mvneta_tx_buf *buf; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; /* Index of the TX DMA descriptor to be cleaned up */ int txq_get_index; u32 done_pkts_coal; /* Virtual address of the TX DMA descriptors array */ struct mvneta_tx_desc *descs; /* DMA address of the TX DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last TX DMA descriptor */ int last_desc; /* Index of the next TX DMA descriptor to process */ int next_desc_to_proc; /* DMA buffers for TSO headers */ char *tso_hdrs[MVNETA_MAX_TSO_PAGES]; /* DMA address of TSO headers */ dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES]; /* Affinity mask for CPUs*/ cpumask_t affinity_mask; }; struct mvneta_rx_queue { /* rx queue number, in the range 0-7 */ u8 id; /* num of rx descriptors in the rx descriptor ring */ int size; u32 pkts_coal; u32 time_coal; /* page_pool */ struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; /* Virtual address of the RX buffer */ void **buf_virt_addr; /* Virtual address of the RX DMA descriptors array */ struct mvneta_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last RX DMA descriptor */ int last_desc; /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; /* Index of first RX DMA descriptor to refill */ int first_to_refill; u32 refill_num; }; static enum cpuhp_state online_hpstate; /* The hardware supports eight (8) rx queues, but we are only allowing * the first one to be used. Therefore, let's just allocate one queue. */ static int rxq_number = 8; static int txq_number = 8; static int rxq_def; static int rx_copybreak __read_mostly = 256; /* HW BM need that each port be identify by a unique ID */ static int global_port_id; #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" /* Utility/helper methods */ /* Write helper method */ static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) { writel(data, pp->base + offset); } /* Read helper method */ static u32 mvreg_read(struct mvneta_port *pp, u32 offset) { return readl(pp->base + offset); } /* Increment txq get counter */ static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) { txq->txq_get_index++; if (txq->txq_get_index == txq->size) txq->txq_get_index = 0; } /* Increment txq put counter */ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) { txq->txq_put_index++; if (txq->txq_put_index == txq->size) txq->txq_put_index = 0; } /* Clear all MIB counters */ static void mvneta_mib_counters_clear(struct mvneta_port *pp) { int i; /* Perform dummy reads from MIB counters */ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); } /* Get System Network Statistics */ static void mvneta_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvneta_port *pp = netdev_priv(dev); unsigned int start; int cpu; for_each_possible_cpu(cpu) { struct mvneta_pcpu_stats *cpu_stats; u64 rx_packets; u64 rx_bytes; u64 rx_dropped; u64 rx_errors; u64 tx_packets; u64 tx_bytes; cpu_stats = per_cpu_ptr(pp->stats, cpu); do { start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = cpu_stats->es.ps.rx_packets; rx_bytes = cpu_stats->es.ps.rx_bytes; rx_dropped = cpu_stats->rx_dropped; rx_errors = cpu_stats->rx_errors; tx_packets = cpu_stats->es.ps.tx_packets; tx_bytes = cpu_stats->es.ps.tx_bytes; } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->rx_dropped += rx_dropped; stats->rx_errors += rx_errors; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } stats->tx_dropped = dev->stats.tx_dropped; } /* Rx descriptors helper methods */ /* Checks whether the RX descriptor having this status is both the first * and the last descriptor for the RX packet. Each RX packet is currently * received through a single RX descriptor, so not having each RX * descriptor with its first and last bits set is an error */ static int mvneta_rxq_desc_is_first_last(u32 status) { return (status & MVNETA_RXD_FIRST_LAST_DESC) == MVNETA_RXD_FIRST_LAST_DESC; } /* Add number of descriptors ready to receive new packets */ static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int ndescs) { /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can * be added at once */ while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; } mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); } /* Get number of RX descriptors occupied by received packets */ static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; } /* Update num of rx desc called upon return from rx path or * from mvneta_rxq_drop_pkts(). */ static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int rx_done, int rx_filled) { u32 val; if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { val = rx_done | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); return; } /* Only 255 descriptors can be added at once */ while ((rx_done > 0) || (rx_filled > 0)) { if (rx_done <= 0xff) { val = rx_done; rx_done = 0; } else { val = 0xff; rx_done -= 0xff; } if (rx_filled <= 0xff) { val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; rx_filled = 0; } else { val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; rx_filled -= 0xff; } mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); } } /* Get pointer to next RX descriptor to be processed by SW */ static struct mvneta_rx_desc * mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) { int rx_desc = rxq->next_desc_to_proc; rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); prefetch(rxq->descs + rxq->next_desc_to_proc); return rxq->descs + rx_desc; } /* Change maximum receive size of the port. */ static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) { u32 val; val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << MVNETA_GMAC_MAX_RX_SIZE_SHIFT; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); } /* Set rx queue offset */ static void mvneta_rxq_offset_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int offset) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; /* Offset is in */ val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Tx descriptors helper methods */ /* Update HW with number of TX descriptors to be sent */ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int pend_desc) { u32 val; pend_desc += txq->pending; /* Only 255 Tx descriptors can be added at once */ do { val = min(pend_desc, 255); mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); pend_desc -= val; } while (pend_desc > 0); txq->pending = 0; } /* Get pointer to next TX descriptor to be processed (send) by HW */ static struct mvneta_tx_desc * mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) { int tx_desc = txq->next_desc_to_proc; txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); return txq->descs + tx_desc; } /* Release the last allocated TX descriptor. Useful to handle DMA * mapping failures in the TX path. */ static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) { if (txq->next_desc_to_proc == 0) txq->next_desc_to_proc = txq->last_desc - 1; else txq->next_desc_to_proc--; } /* Set rxq buf size */ static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int buf_size) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); val &= ~MVNETA_RXQ_BUF_SIZE_MASK; val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); } /* Disable buffer management (BM) */ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_HW_BUF_ALLOC; mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Enable buffer management (BM) */ static void mvneta_rxq_bm_enable(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val |= MVNETA_RXQ_HW_BUF_ALLOC; mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Notify HW about port's assignment of pool for bigger packets */ static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Notify HW about port's assignment of pool for smaller packets */ static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { u32 val; val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } /* Set port's receive buffer size for assigned BM pool */ static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, int buf_size, u8 pool_id) { u32 val; if (!IS_ALIGNED(buf_size, 8)) { dev_warn(pp->dev->dev.parent, "illegal buf_size value %d, round to %d\n", buf_size, ALIGN(buf_size, 8)); buf_size = ALIGN(buf_size, 8); } val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); } /* Configure MBUS window in order to enable access BM internal SRAM */ static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, u8 target, u8 attr) { u32 win_enable, win_protect; int i; win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); if (pp->bm_win_id < 0) { /* Find first not occupied window */ for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { if (win_enable & (1 << i)) { pp->bm_win_id = i; break; } } if (i == MVNETA_MAX_DECODE_WIN) return -ENOMEM; } else { i = pp->bm_win_id; } mvreg_write(pp, MVNETA_WIN_BASE(i), 0); mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); if (i < 4) mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | (attr << 8) | target); mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); win_protect |= 3 << (2 * i); mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); win_enable &= ~(1 << i); mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); return 0; } static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) { u32 wsize; u8 target, attr; int err; /* Get BM window information */ err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, &target, &attr); if (err < 0) return err; pp->bm_win_id = -1; /* Open NETA -> BM window */ err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, target, attr); if (err < 0) { netdev_info(pp->dev, "fail to configure mbus window to BM\n"); return err; } return 0; } /* Assign and initialize pools for port. In case of fail * buffer manager will remain disabled for current port. */ static int mvneta_bm_port_init(struct platform_device *pdev, struct mvneta_port *pp) { struct device_node *dn = pdev->dev.of_node; u32 long_pool_id, short_pool_id; if (!pp->neta_armada3700) { int ret; ret = mvneta_bm_port_mbus_init(pp); if (ret) return ret; } if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { netdev_info(pp->dev, "missing long pool id\n"); return -EINVAL; } /* Create port's long pool depending on mtu */ pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, MVNETA_BM_LONG, pp->id, MVNETA_RX_PKT_SIZE(pp->dev->mtu)); if (!pp->pool_long) { netdev_info(pp->dev, "fail to obtain long pool for port\n"); return -ENOMEM; } pp->pool_long->port_map |= 1 << pp->id; mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, pp->pool_long->id); /* If short pool id is not defined, assume using single pool */ if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) short_pool_id = long_pool_id; /* Create port's short pool */ pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, MVNETA_BM_SHORT, pp->id, MVNETA_BM_SHORT_PKT_SIZE); if (!pp->pool_short) { netdev_info(pp->dev, "fail to obtain short pool for port\n"); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); return -ENOMEM; } if (short_pool_id != long_pool_id) { pp->pool_short->port_map |= 1 << pp->id; mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, pp->pool_short->id); } return 0; } /* Update settings of a pool for bigger packets */ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) { struct mvneta_bm_pool *bm_pool = pp->pool_long; struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; int num; /* Release all buffers from long pool */ mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); if (hwbm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); goto bm_mtu_err; } bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); /* Fill entire long pool */ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", bm_pool->id, num, hwbm_pool->size); goto bm_mtu_err; } mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); return; bm_mtu_err: mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); pp->bm_priv = NULL; pp->rx_offset_correction = MVNETA_SKB_HEADROOM; mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); } /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { int queue; u32 q_map; /* Enable all initialized TXs. */ q_map = 0; for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; if (txq->descs) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); q_map = 0; /* Enable all initialized RXQs. */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; if (rxq->descs) q_map |= (1 << queue); } mvreg_write(pp, MVNETA_RXQ_CMD, q_map); } /* Stop the Ethernet port activity */ static void mvneta_port_down(struct mvneta_port *pp) { u32 val; int count; /* Stop Rx port activity. Check port Rx activity. */ val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; /* Issue stop command for active channels only */ if (val != 0) mvreg_write(pp, MVNETA_RXQ_CMD, val << MVNETA_RXQ_DISABLE_SHIFT); /* Wait for all Rx activity to terminate. */ count = 0; do { if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", val); break; } mdelay(1); val = mvreg_read(pp, MVNETA_RXQ_CMD); } while (val & MVNETA_RXQ_ENABLE_MASK); /* Stop Tx port activity. Check port Tx activity. Issue stop * command for active channels only */ val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; if (val != 0) mvreg_write(pp, MVNETA_TXQ_CMD, (val << MVNETA_TXQ_DISABLE_SHIFT)); /* Wait for all Tx activity to terminate. */ count = 0; do { if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, "TIMEOUT for TX stopped status=0x%08x\n", val); break; } mdelay(1); /* Check TX Command reg that all Txqs are stopped */ val = mvreg_read(pp, MVNETA_TXQ_CMD); } while (val & MVNETA_TXQ_ENABLE_MASK); /* Double check to verify that TX FIFO is empty */ count = 0; do { if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { netdev_warn(pp->dev, "TX FIFO empty timeout status=0x%08x\n", val); break; } mdelay(1); val = mvreg_read(pp, MVNETA_PORT_STATUS); } while (!(val & MVNETA_TX_FIFO_EMPTY) && (val & MVNETA_TX_IN_PRGRS)); udelay(200); } /* Enable the port by setting the port enable bit of the MAC control register */ static void mvneta_port_enable(struct mvneta_port *pp) { u32 val; /* Enable port */ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val |= MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); } /* Disable the port and wait for about 200 usec before retuning */ static void mvneta_port_disable(struct mvneta_port *pp) { u32 val; /* Reset the Enable bit in the Serial Control Register */ val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); udelay(200); } /* Multicast tables methods */ /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { val = 0; } else { val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); } /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { val = 0; } else { val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xfc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); } /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) { int offset; u32 val; if (queue == -1) { memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); val = 0; } else { memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); val = 0x1 | (queue << 1); val |= (val << 24) | (val << 16) | (val << 8); } for (offset = 0; offset <= 0xfc; offset += 4) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } static void mvneta_percpu_unmask_interrupt(void *arg) { struct mvneta_port *pp = arg; /* All the queue are unmasked, but actually only the ones * mapped to this CPU will be unmasked */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK_ALL | MVNETA_TX_INTR_MASK_ALL | MVNETA_MISCINTR_INTR_MASK); } static void mvneta_percpu_mask_interrupt(void *arg) { struct mvneta_port *pp = arg; /* All the queue are masked, but actually only the ones * mapped to this CPU will be masked */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); } static void mvneta_percpu_clear_intr_cause(void *arg) { struct mvneta_port *pp = arg; /* All the queue are cleared, but actually only the ones * mapped to this CPU will be cleared */ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); } /* This method sets defaults to the NETA port: * Clears interrupt Cause and Mask registers. * Clears all MAC tables. * Sets defaults to all registers. * Resets RX and TX descriptor rings. * Resets PHY. * This method can be called after mvneta_port_down() to return the port * settings to defaults. */ static void mvneta_defaults_set(struct mvneta_port *pp) { int cpu; int queue; u32 val; int max_cpu = num_present_cpus(); /* Clear all Cause registers */ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_ENABLE, 0); /* Enable MBUS Retry bit16 */ mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); /* Set CPU queue access map. CPUs are assigned to the RX and * TX queues modulo their number. If there is only one TX * queue then it is assigned to the CPU associated to the * default RX queue. */ for_each_present_cpu(cpu) { int rxq_map = 0, txq_map = 0; int rxq, txq; if (!pp->neta_armada3700) { for (rxq = 0; rxq < rxq_number; rxq++) if ((rxq % max_cpu) == cpu) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); for (txq = 0; txq < txq_number; txq++) if ((txq % max_cpu) == cpu) txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); /* With only one TX queue we configure a special case * which will allow to get all the irq on a single * CPU */ if (txq_number == 1) txq_map = (cpu == pp->rxq_def) ? MVNETA_CPU_TXQ_ACCESS(0) : 0; } else { txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; } mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); } /* Reset RX and TX DMAs */ mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); /* Disable Legacy WRR, Disable EJP, Release from reset */ mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); for (queue = 0; queue < txq_number; queue++) { mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); } mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); /* Set Port Acceleration Mode */ if (pp->bm_priv) /* HW buffer management + legacy parser */ val = MVNETA_ACC_MODE_EXT2; else /* SW buffer management + legacy parser */ val = MVNETA_ACC_MODE_EXT1; mvreg_write(pp, MVNETA_ACC_MODE, val); if (pp->bm_priv) mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); /* Update val of portCfg register accordingly with all RxQueue types */ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); val = 0; mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); /* Build PORT_SDMA_CONFIG_REG */ val = 0; /* Default burst size */ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; #if defined(__BIG_ENDIAN) val |= MVNETA_DESC_SWAP; #endif /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); /* Disable PHY polling in hardware, since we're using the * kernel phylib to do this. */ val = mvreg_read(pp, MVNETA_UNIT_CONTROL); val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); /* Set port interrupt enable register - default enable all */ mvreg_write(pp, MVNETA_INTR_ENABLE, (MVNETA_RXQ_INTR_ENABLE_ALL_MASK | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); mvneta_mib_counters_clear(pp); } /* Set max sizes for tx queues */ static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) { u32 val, size, mtu; int queue; mtu = max_tx_size * 8; if (mtu > MVNETA_TX_MTU_MAX) mtu = MVNETA_TX_MTU_MAX; /* Set MTU */ val = mvreg_read(pp, MVNETA_TX_MTU); val &= ~MVNETA_TX_MTU_MAX; val |= mtu; mvreg_write(pp, MVNETA_TX_MTU, val); /* TX token size and all TXQs token size must be larger that MTU */ val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); size = val & MVNETA_TX_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVNETA_TX_TOKEN_SIZE_MAX; val |= size; mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); } for (queue = 0; queue < txq_number; queue++) { val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; val |= size; mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); } } } /* Set unicast address */ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, int queue) { unsigned int unicast_reg; unsigned int tbl_offset; unsigned int reg_offset; /* Locate the Unicast table entry */ last_nibble = (0xf & last_nibble); /* offset from unicast tbl base */ tbl_offset = (last_nibble / 4) * 4; /* offset within the above reg */ reg_offset = last_nibble % 4; unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); if (queue == -1) { /* Clear accepts frame bit at specified unicast DA tbl entry */ unicast_reg &= ~(0xff << (8 * reg_offset)); } else { unicast_reg &= ~(0xff << (8 * reg_offset)); unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); } /* Set mac address */ static void mvneta_mac_addr_set(struct mvneta_port *pp, const unsigned char *addr, int queue) { unsigned int mac_h; unsigned int mac_l; if (queue != -1) { mac_l = (addr[4] << 8) | (addr[5]); mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | (addr[3] << 0); mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); } /* Accept frames of this address */ mvneta_set_ucast_addr(pp, addr[5], queue); } /* Set the number of packets that will be received before RX interrupt * will be generated by HW. */ static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) { mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), value | MVNETA_RXQ_NON_OCCUPIED(0)); } /* Set the time delay in usec before RX interrupt will be generated by * HW. */ static void mvneta_rx_time_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) { u32 val; unsigned long clk_rate; clk_rate = clk_get_rate(pp->clk); val = (clk_rate / 1000000) * value; mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); } /* Set threshold for TX_DONE pkts coalescing */ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, struct mvneta_tx_queue *txq, u32 value) { u32 val; val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; val |= MVNETA_TXQ_SENT_THRESH_MASK(value); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); } /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, u32 phys_addr, void *virt_addr, struct mvneta_rx_queue *rxq) { int i; rx_desc->buf_phys_addr = phys_addr; i = rx_desc - rxq->descs; rxq->buf_virt_addr[i] = virt_addr; } /* Decrement sent descriptors counter */ static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int sent_desc) { u32 val; /* Only 255 TX descriptors can be updated at once */ while (sent_desc > 0xff) { val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); sent_desc = sent_desc - 0xff; } val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); } /* Get number of TX descriptors already sent by HW */ static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { u32 val; int sent_desc; val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> MVNETA_TXQ_SENT_DESC_SHIFT; return sent_desc; } /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. */ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { int sent_desc; /* Get number of sent descriptors */ sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); /* Decrement sent descriptors counter */ if (sent_desc) mvneta_txq_sent_desc_dec(pp, txq, sent_desc); return sent_desc; } /* Set TXQ descriptors fields relevant for CSUM calculation */ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, int ip_hdr_len, int l4_proto) { u32 command; /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, * G_L4_chk, L4_type; required only for checksum * calculation */ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; if (l3_proto == htons(ETH_P_IP)) command |= MVNETA_TXD_IP_CSUM; else command |= MVNETA_TX_L3_IP6; if (l4_proto == IPPROTO_TCP) command |= MVNETA_TX_L4_CSUM_FULL; else if (l4_proto == IPPROTO_UDP) command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; else command |= MVNETA_TX_L4_CSUM_NOT; return command; } /* Display more error info */ static void mvneta_rx_error(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); u32 status = rx_desc->status; /* update per-cpu counter */ u64_stats_update_begin(&stats->syncp); stats->rx_errors++; u64_stats_update_end(&stats->syncp); switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_OVERRUN: netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_LEN: netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", status, rx_desc->data_size); break; case MVNETA_RXD_ERR_RESOURCE: netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", status, rx_desc->data_size); break; } } /* Handle RX checksum offload based on the descriptor's status */ static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) { if ((pp->dev->features & NETIF_F_RXCSUM) && (status & MVNETA_RXD_L3_IP4) && (status & MVNETA_RXD_L4_CSUM_OK)) return CHECKSUM_UNNECESSARY; return CHECKSUM_NONE; } /* Return tx queue pointer (find last set bit) according to <cause> returned * form tx_done reg. <cause> must not be null. The return value is always a * valid queue for matching the first one found in <cause>. */ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, u32 cause) { int queue = fls(cause) - 1; return &pp->txqs[queue]; } /* Free tx queue skbuffs */ static void mvneta_txq_bufs_free(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int num, struct netdev_queue *nq, bool napi) { unsigned int bytes_compl = 0, pkts_compl = 0; struct xdp_frame_bulk bq; int i; xdp_frame_bulk_init(&bq); rcu_read_lock(); /* need for xdp_return_frame_bulk */ for (i = 0; i < num; i++) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; mvneta_txq_inc_get(txq); if (buf->type == MVNETA_TYPE_XDP_NDO || buf->type == MVNETA_TYPE_SKB) dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); if ((buf->type == MVNETA_TYPE_TSO || buf->type == MVNETA_TYPE_SKB) && buf->skb) { bytes_compl += buf->skb->len; pkts_compl++; dev_kfree_skb_any(buf->skb); } else if ((buf->type == MVNETA_TYPE_XDP_TX || buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { if (napi && buf->type == MVNETA_TYPE_XDP_TX) xdp_return_frame_rx_napi(buf->xdpf); else xdp_return_frame_bulk(buf->xdpf, &bq); } } xdp_flush_frame_bulk(&bq); rcu_read_unlock(); netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } /* Handle end of transmission */ static void mvneta_txq_done(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done; tx_done = mvneta_txq_sent_desc_proc(pp, txq); if (!tx_done) return; mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); txq->count -= tx_done; if (netif_tx_queue_stopped(nq)) { if (txq->count <= txq->tx_wake_threshold) netif_tx_wake_queue(nq); } } /* Refill processing for SW buffer management */ /* Allocate page per descriptor */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, gfp_t gfp_mask) { dma_addr_t phys_addr; struct page *page; page = page_pool_alloc_pages(rxq->page_pool, gfp_mask | __GFP_NOWARN); if (!page) return -ENOMEM; phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; } /* Handle tx checksum */ static u32 mvneta_skb_tx_csum(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; __be16 l3_proto = vlan_get_protocol(skb); u8 l4_proto; if (l3_proto == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; } else if (l3_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ if (skb_network_header_len(skb) > 0) ip_hdr_len = (skb_network_header_len(skb) >> 2); l4_proto = ip6h->nexthdr; } else return MVNETA_TX_L4_CSUM_NOT; return mvneta_txq_desc_csum(skb_network_offset(skb), l3_proto, ip_hdr_len, l4_proto); } return MVNETA_TX_L4_CSUM_NOT; } /* Drop packets received by the RXQ and free buffers */ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { int rx_done, i; rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); if (rx_done) mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); if (pp->bm_priv) { for (i = 0; i < rx_done; i++) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); struct mvneta_bm_pool *bm_pool; bm_pool = &pp->bm_priv->bm_pools[pool_id]; /* Return dropped buffer to the pool */ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, rx_desc->buf_phys_addr); } return; } for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = rxq->buf_virt_addr[i]; if (!data || !(rx_desc->buf_phys_addr)) continue; page_pool_put_full_page(rxq->page_pool, data, false); } if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) xdp_rxq_info_unreg(&rxq->xdp_rxq); page_pool_destroy(rxq->page_pool); rxq->page_pool = NULL; } static void mvneta_update_stats(struct mvneta_port *pp, struct mvneta_stats *ps) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); u64_stats_update_begin(&stats->syncp); stats->es.ps.rx_packets += ps->rx_packets; stats->es.ps.rx_bytes += ps->rx_bytes; /* xdp */ stats->es.ps.xdp_redirect += ps->xdp_redirect; stats->es.ps.xdp_pass += ps->xdp_pass; stats->es.ps.xdp_drop += ps->xdp_drop; u64_stats_update_end(&stats->syncp); } static inline int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { struct mvneta_rx_desc *rx_desc; int curr_desc = rxq->first_to_refill; int i; for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { rx_desc = rxq->descs + curr_desc; if (!(rx_desc->buf_phys_addr)) { if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { struct mvneta_pcpu_stats *stats; pr_err("Can't refill queue %d. Done %d from %d\n", rxq->id, i, rxq->refill_num); stats = this_cpu_ptr(pp->stats); u64_stats_update_begin(&stats->syncp); stats->es.refill_error++; u64_stats_update_end(&stats->syncp); break; } } curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); } rxq->refill_num -= i; rxq->first_to_refill = curr_desc; return i; } static void mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int sync_len) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; if (likely(!xdp_buff_has_frags(xdp))) goto out; for (i = 0; i < sinfo->nr_frags; i++) page_pool_put_full_page(rxq->page_pool, skb_frag_page(&sinfo->frags[i]), true); out: page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), sync_len, true); } static int mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); struct device *dev = pp->dev->dev.parent; struct mvneta_tx_desc *tx_desc; int i, num_frames = 1; struct page *page; if (unlikely(xdp_frame_has_frags(xdpf))) num_frames += sinfo->nr_frags; if (txq->count + num_frames >= txq->size) return MVNETA_XDP_DROPPED; for (i = 0; i < num_frames; i++) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = NULL; int len = xdpf->len; dma_addr_t dma_addr; if (unlikely(i)) { /* paged area */ frag = &sinfo->frags[i - 1]; len = skb_frag_size(frag); } tx_desc = mvneta_txq_next_desc_get(txq); if (dma_map) { /* ndo_xdp_xmit */ void *data; data = unlikely(frag) ? skb_frag_address(frag) : xdpf->data; dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { mvneta_txq_desc_put(txq); goto unmap; } buf->type = MVNETA_TYPE_XDP_NDO; } else { page = unlikely(frag) ? skb_frag_page(frag) : virt_to_page(xdpf->data); dma_addr = page_pool_get_dma_addr(page); if (unlikely(frag)) dma_addr += skb_frag_off(frag); else dma_addr += sizeof(*xdpf) + xdpf->headroom; dma_sync_single_for_device(dev, dma_addr, len, DMA_BIDIRECTIONAL); buf->type = MVNETA_TYPE_XDP_TX; } buf->xdpf = unlikely(i) ? NULL : xdpf; tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = dma_addr; tx_desc->data_size = len; *nxmit_byte += len; mvneta_txq_inc_put(txq); } /*last descriptor */ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; txq->pending += num_frames; txq->count += num_frames; return MVNETA_XDP_TX; unmap: for (i--; i >= 0; i--) { mvneta_txq_desc_put(txq); tx_desc = txq->descs + txq->next_desc_to_proc; dma_unmap_single(dev, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); } return MVNETA_XDP_DROPPED; } static int mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_tx_queue *txq; struct netdev_queue *nq; int cpu, nxmit_byte = 0; struct xdp_frame *xdpf; u32 ret; xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) return MVNETA_XDP_DROPPED; cpu = smp_processor_id(); txq = &pp->txqs[cpu % txq_number]; nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, cpu); ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false); if (ret == MVNETA_XDP_TX) { u64_stats_update_begin(&stats->syncp); stats->es.ps.tx_bytes += nxmit_byte; stats->es.ps.tx_packets++; stats->es.ps.xdp_tx++; u64_stats_update_end(&stats->syncp); mvneta_txq_pend_desc_add(pp, txq, 0); } else { u64_stats_update_begin(&stats->syncp); stats->es.ps.xdp_tx_err++; u64_stats_update_end(&stats->syncp); } __netif_tx_unlock(nq); return ret; } static int mvneta_xdp_xmit(struct net_device *dev, int num_frame, struct xdp_frame **frames, u32 flags) { struct mvneta_port *pp = netdev_priv(dev); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); int i, nxmit_byte = 0, nxmit = 0; int cpu = smp_processor_id(); struct mvneta_tx_queue *txq; struct netdev_queue *nq; u32 ret; if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; txq = &pp->txqs[cpu % txq_number]; nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, cpu); for (i = 0; i < num_frame; i++) { ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte, true); if (ret != MVNETA_XDP_TX) break; nxmit++; } if (unlikely(flags & XDP_XMIT_FLUSH)) mvneta_txq_pend_desc_add(pp, txq, 0); __netif_tx_unlock(nq); u64_stats_update_begin(&stats->syncp); stats->es.ps.tx_bytes += nxmit_byte; stats->es.ps.tx_packets += nxmit; stats->es.ps.xdp_xmit += nxmit; stats->es.ps.xdp_xmit_err += num_frame - nxmit; u64_stats_update_end(&stats->syncp); return nxmit; } static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, u32 frame_sz, struct mvneta_stats *stats) { unsigned int len, data_len, sync; u32 ret, act; len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; data_len = xdp->data_end - xdp->data; act = bpf_prog_run_xdp(prog, xdp); /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; sync = max(sync, len); switch (act) { case XDP_PASS: stats->xdp_pass++; return MVNETA_XDP_PASS; case XDP_REDIRECT: { int err; err = xdp_do_redirect(pp->dev, xdp, prog); if (unlikely(err)) { mvneta_xdp_put_buff(pp, rxq, xdp, sync); ret = MVNETA_XDP_DROPPED; } else { ret = MVNETA_XDP_REDIR; stats->xdp_redirect++; } break; } case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) mvneta_xdp_put_buff(pp, rxq, xdp, sync); break; default: bpf_warn_invalid_xdp_action(pp->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(pp->dev, prog, act); fallthrough; case XDP_DROP: mvneta_xdp_put_buff(pp, rxq, xdp, sync); ret = MVNETA_XDP_DROPPED; stats->xdp_drop++; break; } stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; stats->rx_packets++; return ret; } static void mvneta_swbm_rx_frame(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int *size, struct page *page) { unsigned char *data = page_address(page); int data_len = -MVNETA_MH_SIZE, len; struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; data_len += len; } else { len = *size; data_len += len - ETH_FCS_LEN; } *size = *size - len; dma_dir = page_pool_get_dma_dir(rxq->page_pool); dma_sync_single_for_cpu(dev->dev.parent, rx_desc->buf_phys_addr, len, dma_dir); rx_desc->buf_phys_addr = 0; /* Prefetch header */ prefetch(data); xdp_buff_clear_frags_flag(xdp); xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, data_len, false); } static void mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int *size, struct page *page) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; int data_len, len; if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; data_len = len; } else { len = *size; data_len = len - ETH_FCS_LEN; } dma_dir = page_pool_get_dma_dir(rxq->page_pool); dma_sync_single_for_cpu(dev->dev.parent, rx_desc->buf_phys_addr, len, dma_dir); rx_desc->buf_phys_addr = 0; if (!xdp_buff_has_frags(xdp)) sinfo->nr_frags = 0; if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; skb_frag_fill_page_desc(frag, page, pp->rx_offset_correction, data_len); if (!xdp_buff_has_frags(xdp)) { sinfo->xdp_frags_size = *size; xdp_buff_set_frags_flag(xdp); } if (page_is_pfmemalloc(page)) xdp_buff_set_frag_pfmemalloc(xdp); } else { page_pool_put_full_page(rxq->page_pool, page, true); } *size -= len; } static struct sk_buff * mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct sk_buff *skb; u8 num_frags; if (unlikely(xdp_buff_has_frags(xdp))) num_frags = sinfo->nr_frags; skb = build_skb(xdp->data_hard_start, PAGE_SIZE); if (!skb) return ERR_PTR(-ENOMEM); skb_mark_for_recycle(skb); skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); skb->ip_summed = mvneta_rx_csum(pp, desc_status); if (unlikely(xdp_buff_has_frags(xdp))) xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, num_frags * xdp->frame_sz, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } /* Main rx processing when using software buffer management */ static int mvneta_rx_swbm(struct napi_struct *napi, struct mvneta_port *pp, int budget, struct mvneta_rx_queue *rxq) { int rx_proc = 0, rx_todo, refill, size = 0; struct net_device *dev = pp->dev; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; u32 desc_status, frame_sz; struct xdp_buff xdp_buf; xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); xdp_buf.data_hard_start = NULL; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); xdp_prog = READ_ONCE(pp->xdp_prog); /* Fairness NAPI loop */ while (rx_proc < budget && rx_proc < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); u32 rx_status, index; struct sk_buff *skb; struct page *page; index = rx_desc - rxq->descs; page = (struct page *)rxq->buf_virt_addr[index]; rx_status = rx_desc->status; rx_proc++; rxq->refill_num++; if (rx_status & MVNETA_RXD_FIRST_DESC) { /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); goto next; } size = rx_desc->data_size; frame_sz = size - ETH_FCS_LEN; desc_status = rx_status; mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, &size, page); } else { if (unlikely(!xdp_buf.data_hard_start)) { rx_desc->buf_phys_addr = 0; page_pool_put_full_page(rxq->page_pool, page, true); goto next; } mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, &size, page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) /* no last descriptor this time */ continue; if (size) { mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); goto next; } if (xdp_prog && mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) goto next; skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status); if (IS_ERR(skb)) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); u64_stats_update_begin(&stats->syncp); stats->es.skb_alloc_error++; stats->rx_dropped++; u64_stats_update_end(&stats->syncp); goto next; } ps.rx_bytes += skb->len; ps.rx_packets++; skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(napi, skb); next: xdp_buf.data_hard_start = NULL; } if (xdp_buf.data_hard_start) mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1); if (ps.xdp_redirect) xdp_do_flush_map(); if (ps.rx_packets) mvneta_update_stats(pp, &ps); /* return some buffers to hardware queue, one at a time is too slow */ refill = mvneta_rx_refill_queue(pp, rxq); /* Update rxq management counters */ mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); return ps.rx_packets; } /* Main rx processing when using hardware buffer management */ static int mvneta_rx_hwbm(struct napi_struct *napi, struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; int rx_done; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; /* Get number of received packets */ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); if (rx_todo > rx_done) rx_todo = rx_done; rx_done = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); struct mvneta_bm_pool *bm_pool = NULL; struct sk_buff *skb; unsigned char *data; dma_addr_t phys_addr; u32 rx_status, frag_size; int rx_bytes, err; u8 pool_id; rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (u8 *)(uintptr_t)rx_desc->buf_cookie; phys_addr = rx_desc->buf_phys_addr; pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); bm_pool = &pp->bm_priv->bm_pools[pool_id]; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { err_drop_frame_ret_pool: /* Return the buffer to the pool */ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, rx_desc->buf_phys_addr); err_drop_frame: mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; } if (rx_bytes <= rx_copybreak) { /* better copy a small frame and not unmap the DMA region */ skb = netdev_alloc_skb_ip_align(dev, rx_bytes); if (unlikely(!skb)) goto err_drop_frame_ret_pool; dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, rx_desc->buf_phys_addr, MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes, DMA_FROM_DEVICE); skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = mvneta_rx_csum(pp, rx_status); napi_gro_receive(napi, skb); rcvd_pkts++; rcvd_bytes += rx_bytes; /* Return the buffer to the pool */ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, rx_desc->buf_phys_addr); /* leave the descriptor and buffer untouched */ continue; } /* Refill processing */ err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); if (err) { struct mvneta_pcpu_stats *stats; netdev_err(dev, "Linux processing - Can't refill\n"); stats = this_cpu_ptr(pp->stats); u64_stats_update_begin(&stats->syncp); stats->es.refill_error++; u64_stats_update_end(&stats->syncp); goto err_drop_frame_ret_pool; } frag_size = bm_pool->hwbm_pool.frag_size; skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); /* After refill old buffer has to be unmapped regardless * the skb is successfully built or not. */ dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, bm_pool->buf_size, DMA_FROM_DEVICE); if (!skb) goto err_drop_frame; rcvd_pkts++; rcvd_bytes += rx_bytes; /* Linux processing */ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = mvneta_rx_csum(pp, rx_status); napi_gro_receive(napi, skb); } if (rcvd_pkts) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); u64_stats_update_begin(&stats->syncp); stats->es.ps.rx_packets += rcvd_pkts; stats->es.ps.rx_bytes += rcvd_bytes; u64_stats_update_end(&stats->syncp); } /* Update rxq management counters */ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); return rx_done; } static void mvneta_free_tso_hdrs(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct device *dev = pp->dev->dev.parent; int i; for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) { if (txq->tso_hdrs[i]) { dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE, txq->tso_hdrs[i], txq->tso_hdrs_phys[i]); txq->tso_hdrs[i] = NULL; } } } static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct device *dev = pp->dev->dev.parent; int i, num; num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE); for (i = 0; i < num; i++) { txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE, &txq->tso_hdrs_phys[i], GFP_KERNEL); if (!txq->tso_hdrs[i]) { mvneta_free_tso_hdrs(pp, txq); return -ENOMEM; } } return 0; } static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma) { int index, offset; index = txq->txq_put_index / MVNETA_TSO_PER_PAGE; offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE; *dma = txq->tso_hdrs_phys[index] + offset; return txq->tso_hdrs[index] + offset; } static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq, struct tso_t *tso, int size, bool is_last) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; int hdr_len = skb_tcp_all_headers(skb); struct mvneta_tx_desc *tx_desc; dma_addr_t hdr_phys; char *hdr; hdr = mvneta_get_tso_hdr(txq, &hdr_phys); tso_build_hdr(skb, hdr, tso, size, is_last); tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; tx_desc->command = mvneta_skb_tx_csum(skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = hdr_phys; buf->type = MVNETA_TYPE_TSO; buf->skb = NULL; mvneta_txq_inc_put(txq); } static inline int mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, struct sk_buff *skb, char *data, int size, bool last_tcp, bool is_last) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = size; tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, tx_desc->buf_phys_addr))) { mvneta_txq_desc_put(txq); return -ENOMEM; } tx_desc->command = 0; buf->type = MVNETA_TYPE_SKB; buf->skb = NULL; if (last_tcp) { /* last descriptor in the TCP packet */ tx_desc->command = MVNETA_TXD_L_DESC; /* last descriptor in SKB */ if (is_last) buf->skb = skb; } mvneta_txq_inc_put(txq); return 0; } static void mvneta_release_descs(struct mvneta_port *pp, struct mvneta_tx_queue *txq, int first, int num) { int desc_idx, i; desc_idx = first + num; if (desc_idx >= txq->size) desc_idx -= txq->size; for (i = num; i >= 0; i--) { struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx; struct mvneta_tx_buf *buf = &txq->buf[desc_idx]; if (buf->type == MVNETA_TYPE_SKB) dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); mvneta_txq_desc_put(txq); if (desc_idx == 0) desc_idx = txq->size; desc_idx -= 1; } } static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvneta_tx_queue *txq) { int hdr_len, total_len, data_left; int first_desc, desc_count = 0; struct mvneta_port *pp = netdev_priv(dev); struct tso_t tso; /* Count needed descriptors */ if ((txq->count + tso_count_descs(skb)) >= txq->size) return 0; if (skb_headlen(skb) < skb_tcp_all_headers(skb)) { pr_info("*** Is this even possible?\n"); return 0; } first_desc = txq->txq_put_index; /* Initialize the TSO handler, and prepare the first payload */ hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; desc_count++; /* prepare packet headers: MAC + IP + TCP */ mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0); while (data_left > 0) { int size; desc_count++; size = min_t(int, tso.size, data_left); if (mvneta_tso_put_data(dev, txq, skb, tso.data, size, size == data_left, total_len == 0)) goto err_release; data_left -= size; tso_build_data(skb, &tso, size); } } return desc_count; err_release: /* Release all used data descriptors; header descriptors must not * be DMA-unmapped. */ mvneta_release_descs(pp, txq, first_desc, desc_count - 1); return 0; } /* Handle tx fragmentation processing */ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, struct mvneta_tx_queue *txq) { struct mvneta_tx_desc *tx_desc; int i, nr_frags = skb_shinfo(skb)->nr_frags; int first_desc = txq->txq_put_index; for (i = 0; i < nr_frags; i++) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = skb_frag_address(frag); tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = skb_frag_size(frag); tx_desc->buf_phys_addr = dma_map_single(pp->dev->dev.parent, addr, tx_desc->data_size, DMA_TO_DEVICE); if (dma_mapping_error(pp->dev->dev.parent, tx_desc->buf_phys_addr)) { mvneta_txq_desc_put(txq); goto error; } if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; buf->skb = skb; } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; buf->skb = NULL; } buf->type = MVNETA_TYPE_SKB; mvneta_txq_inc_put(txq); } return 0; error: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ mvneta_release_descs(pp, txq, first_desc, i - 1); return -ENOMEM; } /* Main tx processing */ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; int len = skb->len; int frags = 0; u32 tx_cmd; if (!netif_running(dev)) goto out; if (skb_is_gso(skb)) { frags = mvneta_tx_tso(skb, dev, txq); goto out; } frags = skb_shinfo(skb)->nr_frags + 1; /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); tx_cmd = mvneta_skb_tx_csum(skb); tx_desc->data_size = skb_headlen(skb); tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, tx_desc->data_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, tx_desc->buf_phys_addr))) { mvneta_txq_desc_put(txq); frags = 0; goto out; } buf->type = MVNETA_TYPE_SKB; if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; buf->skb = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; buf->skb = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ if (mvneta_tx_frag_process(pp, skb, txq)) { dma_unmap_single(dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); mvneta_txq_desc_put(txq); frags = 0; goto out; } } out: if (frags > 0) { struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); netdev_tx_sent_queue(nq, len); txq->count += frags; if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); if (!netdev_xmit_more() || netif_xmit_stopped(nq) || txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) mvneta_txq_pend_desc_add(pp, txq, frags); else txq->pending += frags; u64_stats_update_begin(&stats->syncp); stats->es.ps.tx_bytes += len; stats->es.ps.tx_packets++; u64_stats_update_end(&stats->syncp); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } /* Free tx resources, when resetting a port */ static void mvneta_txq_done_force(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); int tx_done = txq->count; mvneta_txq_bufs_free(pp, txq, tx_done, nq, false); /* reset txq */ txq->count = 0; txq->txq_put_index = 0; txq->txq_get_index = 0; } /* Handle tx done - called in softirq context. The <cause_tx_done> argument * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. */ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) { struct mvneta_tx_queue *txq; struct netdev_queue *nq; int cpu = smp_processor_id(); while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done); nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, cpu); if (txq->count) mvneta_txq_done(pp, txq); __netif_tx_unlock(nq); cause_tx_done &= ~((1 << txq->id)); } } /* Compute crc8 of the specified address, using a unique algorithm , * according to hw spec, different than generic crc8 algorithm */ static int mvneta_addr_crc(unsigned char *addr) { int crc = 0; int i; for (i = 0; i < ETH_ALEN; i++) { int j; crc = (crc ^ addr[i]) << 8; for (j = 7; j >= 0; j--) { if (crc & (0x100 << j)) crc ^= 0x107 << j; } } return crc; } /* This method controls the net device special MAC multicast support. * The Special Multicast Table for MAC addresses supports MAC of the form * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). * The MAC DA[7:0] bits are used as a pointer to the Special Multicast * Table entries in the DA-Filter table. This method set the Special * Multicast Table appropriate entry. */ static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, unsigned char last_byte, int queue) { unsigned int smc_table_reg; unsigned int tbl_offset; unsigned int reg_offset; /* Register offset from SMC table base */ tbl_offset = (last_byte / 4); /* Entry offset within the above reg */ reg_offset = last_byte % 4; smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4)); if (queue == -1) smc_table_reg &= ~(0xff << (8 * reg_offset)); else { smc_table_reg &= ~(0xff << (8 * reg_offset)); smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, smc_table_reg); } /* This method controls the network device Other MAC multicast support. * The Other Multicast Table is used for multicast of another type. * A CRC-8 is used as an index to the Other Multicast Table entries * in the DA-Filter table. * The method gets the CRC-8 value from the calling routine and * sets the Other Multicast Table appropriate entry according to the * specified CRC-8 . */ static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, unsigned char crc8, int queue) { unsigned int omc_table_reg; unsigned int tbl_offset; unsigned int reg_offset; tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ reg_offset = crc8 % 4; /* Entry offset within the above reg */ omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); if (queue == -1) { /* Clear accepts frame bit at specified Other DA table entry */ omc_table_reg &= ~(0xff << (8 * reg_offset)); } else { omc_table_reg &= ~(0xff << (8 * reg_offset)); omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); } mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); } /* The network device supports multicast using two tables: * 1) Special Multicast Table for MAC addresses of the form * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). * The MAC DA[7:0] bits are used as a pointer to the Special Multicast * Table entries in the DA-Filter table. * 2) Other Multicast Table for multicast of another type. A CRC-8 value * is used as an index to the Other Multicast Table entries in the * DA-Filter table. */ static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, int queue) { unsigned char crc_result = 0; if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { mvneta_set_special_mcast_addr(pp, p_addr[5], queue); return 0; } crc_result = mvneta_addr_crc(p_addr); if (queue == -1) { if (pp->mcast_count[crc_result] == 0) { netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", crc_result); return -EINVAL; } pp->mcast_count[crc_result]--; if (pp->mcast_count[crc_result] != 0) { netdev_info(pp->dev, "After delete there are %d valid Mcast for crc8=0x%02x\n", pp->mcast_count[crc_result], crc_result); return -EINVAL; } } else pp->mcast_count[crc_result]++; mvneta_set_other_mcast_addr(pp, crc_result, queue); return 0; } /* Configure Fitering mode of Ethernet port */ static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, int is_promisc) { u32 port_cfg_reg, val; port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); val = mvreg_read(pp, MVNETA_TYPE_PRIO); /* Set / Clear UPM bit in port configuration register */ if (is_promisc) { /* Accept all Unicast addresses */ port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; val |= MVNETA_FORCE_UNI; mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); } else { /* Reject all Unicast addresses */ port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; val &= ~MVNETA_FORCE_UNI; } mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); mvreg_write(pp, MVNETA_TYPE_PRIO, val); } /* register unicast and multicast addresses */ static void mvneta_set_rx_mode(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); struct netdev_hw_addr *ha; if (dev->flags & IFF_PROMISC) { /* Accept all: Multicast + Unicast */ mvneta_rx_unicast_promisc_set(pp, 1); mvneta_set_ucast_table(pp, pp->rxq_def); mvneta_set_special_mcast_table(pp, pp->rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept single Unicast */ mvneta_rx_unicast_promisc_set(pp, 0); mvneta_set_ucast_table(pp, -1); mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); if (dev->flags & IFF_ALLMULTI) { /* Accept all multicast */ mvneta_set_special_mcast_table(pp, pp->rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def); } else { /* Accept only initialized multicast */ mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { mvneta_mcast_addr_set(pp, ha->addr, pp->rxq_def); } } } } } /* Interrupt handling - the callback for request_irq() */ static irqreturn_t mvneta_isr(int irq, void *dev_id) { struct mvneta_port *pp = (struct mvneta_port *)dev_id; mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); napi_schedule(&pp->napi); return IRQ_HANDLED; } /* Interrupt handling - the callback for request_percpu_irq() */ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) { struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; disable_percpu_irq(port->pp->dev->irq); napi_schedule(&port->napi); return IRQ_HANDLED; } static void mvneta_link_change(struct mvneta_port *pp) { u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); } /* NAPI handler * Bits 0 - 7 of the causeRxTx register indicate that are transmitted * packets on the corresponding TXQ (Bit 0 is for TX queue 1). * Bits 8 -15 of the cause Rx Tx register indicate that are received * packets on the corresponding RXQ (Bit 8 is for RX queue 0). * Each CPU has its own causeRxTx register */ static int mvneta_poll(struct napi_struct *napi, int budget) { int rx_done = 0; u32 cause_rx_tx; int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { napi_complete(napi); return rx_done; } /* Read cause register */ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE)) mvneta_link_change(pp); } /* Release Tx descriptors */ if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; } /* For the case where the last mvneta_poll did not process all * RX packets */ cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : port->cause_rx_tx; rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) rx_done = mvneta_rx_hwbm(napi, pp, budget, &pp->rxqs[rx_queue]); else rx_done = mvneta_rx_swbm(napi, pp, budget, &pp->rxqs[rx_queue]); } if (rx_done < budget) { cause_rx_tx = 0; napi_complete_done(napi, rx_done); if (pp->neta_armada3700) { unsigned long flags; local_irq_save(flags); mvreg_write(pp, MVNETA_INTR_NEW_MASK, MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number) | MVNETA_MISCINTR_INTR_MASK); local_irq_restore(flags); } else { enable_percpu_irq(pp->dev->irq, 0); } } if (pp->neta_armada3700) pp->cause_rx_tx = cause_rx_tx; else port->cause_rx_tx = cause_rx_tx; return rx_done; } static int mvneta_create_page_pool(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int size) { struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); struct page_pool_params pp_params = { .order = 0, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, .nid = NUMA_NO_NODE, .dev = pp->dev->dev.parent, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .offset = pp->rx_offset_correction, .max_len = MVNETA_MAX_RX_BUF_SIZE, }; int err; rxq->page_pool = page_pool_create(&pp_params); if (IS_ERR(rxq->page_pool)) { err = PTR_ERR(rxq->page_pool); rxq->page_pool = NULL; return err; } err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0, PAGE_SIZE); if (err < 0) goto err_free_pp; err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, rxq->page_pool); if (err) goto err_unregister_rxq; return 0; err_unregister_rxq: xdp_rxq_info_unreg(&rxq->xdp_rxq); err_free_pp: page_pool_destroy(rxq->page_pool); rxq->page_pool = NULL; return err; } /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int num) { int i, err; err = mvneta_create_page_pool(pp, rxq, num); if (err < 0) return err; for (i = 0; i < num; i++) { memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); if (mvneta_rx_refill(pp, rxq->descs + i, rxq, GFP_KERNEL) != 0) { netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", __func__, rxq->id, i, num); break; } } /* Add this number of RX descriptors as non occupied (ready to * get packets) */ mvneta_rxq_non_occup_desc_add(pp, rxq, i); return i; } /* Free all packets pending transmit from all TXQs and reset TX port */ static void mvneta_tx_reset(struct mvneta_port *pp) { int queue; /* free the skb's in the tx ring */ for (queue = 0; queue < txq_number; queue++) mvneta_txq_done_force(pp, &pp->txqs[queue]); mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); } static void mvneta_rx_reset(struct mvneta_port *pp) { mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); } /* Rx/Tx queue initialization/cleanup methods */ static int mvneta_rxq_sw_init(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { rxq->size = pp->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); if (!rxq->descs) return -ENOMEM; rxq->last_desc = rxq->size - 1; return 0; } static void mvneta_rxq_hw_init(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { /* Set Rx descriptors queue starting address */ mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); /* Set coalescing pkts and time */ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); if (!pp->bm_priv) { /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, 0); mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? MVNETA_MAX_RX_BUF_SIZE : MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); } else { /* Set Offset */ mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction); mvneta_rxq_bm_enable(pp, rxq); /* Fill RXQ with buffers from RX pool */ mvneta_rxq_long_pool_set(pp, rxq); mvneta_rxq_short_pool_set(pp, rxq); mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); } } /* Create a specified RX queue */ static int mvneta_rxq_init(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { int ret; ret = mvneta_rxq_sw_init(pp, rxq); if (ret < 0) return ret; mvneta_rxq_hw_init(pp, rxq); return 0; } /* Cleanup Rx queue */ static void mvneta_rxq_deinit(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) { mvneta_rxq_drop_pkts(pp, rxq); if (rxq->descs) dma_free_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, rxq->descs, rxq->descs_phys); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_phys = 0; rxq->first_to_refill = 0; rxq->refill_num = 0; } static int mvneta_txq_sw_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { int cpu, err; txq->size = pp->tx_ring_size; /* A queue must always have room for at least one skb. * Therefore, stop the queue when the free entries reaches * the maximum number of descriptors per skb. */ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; txq->tx_wake_threshold = txq->tx_stop_threshold / 2; /* Allocate memory for TX descriptors */ txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); if (!txq->buf) return -ENOMEM; /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ err = mvneta_alloc_tso_hdrs(pp, txq); if (err) return err; /* Setup XPS mapping */ if (pp->neta_armada3700) cpu = 0; else if (txq_number > 1) cpu = txq->id % num_present_cpus(); else cpu = pp->rxq_def % num_present_cpus(); cpumask_set_cpu(cpu, &txq->affinity_mask); netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); return 0; } static void mvneta_txq_hw_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { /* Set maximum bandwidth for enabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); /* Set Tx descriptors queue starting address */ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); } /* Create and initialize a tx queue */ static int mvneta_txq_init(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { int ret; ret = mvneta_txq_sw_init(pp, txq); if (ret < 0) return ret; mvneta_txq_hw_init(pp, txq); return 0; } /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ static void mvneta_txq_sw_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); kfree(txq->buf); mvneta_free_tso_hdrs(pp, txq); if (txq->descs) dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); netdev_tx_reset_queue(nq); txq->buf = NULL; txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_phys = 0; } static void mvneta_txq_hw_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { /* Set minimum bandwidth for disabled TXQs */ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); } static void mvneta_txq_deinit(struct mvneta_port *pp, struct mvneta_tx_queue *txq) { mvneta_txq_sw_deinit(pp, txq); mvneta_txq_hw_deinit(pp, txq); } /* Cleanup all Tx queues */ static void mvneta_cleanup_txqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < txq_number; queue++) mvneta_txq_deinit(pp, &pp->txqs[queue]); } /* Cleanup all Rx queues */ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < rxq_number; queue++) mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } /* Init all Rx queues */ static int mvneta_setup_rxqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < rxq_number; queue++) { int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); if (err) { netdev_err(pp->dev, "%s: can't create rxq=%d\n", __func__, queue); mvneta_cleanup_rxqs(pp); return err; } } return 0; } /* Init all tx queues */ static int mvneta_setup_txqs(struct mvneta_port *pp) { int queue; for (queue = 0; queue < txq_number; queue++) { int err = mvneta_txq_init(pp, &pp->txqs[queue]); if (err) { netdev_err(pp->dev, "%s: can't create txq=%d\n", __func__, queue); mvneta_cleanup_txqs(pp); return err; } } return 0; } static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) { int ret; ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(pp->comphy); } static int mvneta_config_interface(struct mvneta_port *pp, phy_interface_t interface) { int ret = 0; if (pp->comphy) { if (interface == PHY_INTERFACE_MODE_SGMII || interface == PHY_INTERFACE_MODE_1000BASEX || interface == PHY_INTERFACE_MODE_2500BASEX) { ret = mvneta_comphy_init(pp, interface); } } else { switch (interface) { case PHY_INTERFACE_MODE_QSGMII: mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); break; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); break; case PHY_INTERFACE_MODE_2500BASEX: mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_HSGMII_SERDES_PROTO); break; default: break; } } pp->phy_interface = interface; return ret; } static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); /* start the Rx/Tx activity */ mvneta_port_enable(pp); if (!pp->neta_armada3700) { /* Enable polling on the port */ for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_enable(&port->napi); } } else { napi_enable(&pp->napi); } /* Unmask interrupts. It has to be done from each CPU */ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE); phylink_start(pp->phylink); /* We may have called phylink_speed_down before */ phylink_speed_up(pp->phylink); netif_tx_start_all_queues(pp->dev); clear_bit(__MVNETA_DOWN, &pp->state); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; set_bit(__MVNETA_DOWN, &pp->state); if (device_may_wakeup(&pp->dev->dev)) phylink_speed_down(pp->phylink, false); phylink_stop(pp->phylink); if (!pp->neta_armada3700) { for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_disable(&port->napi); } } else { napi_disable(&pp->napi); } netif_carrier_off(pp->dev); mvneta_port_down(pp); netif_tx_stop_all_queues(pp->dev); /* Stop the port activity */ mvneta_port_disable(pp); /* Clear all ethernet port interrupts */ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); mvneta_tx_reset(pp); mvneta_rx_reset(pp); WARN_ON(phy_power_off(pp->comphy)); } static void mvneta_percpu_enable(void *arg) { struct mvneta_port *pp = arg; enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); } static void mvneta_percpu_disable(void *arg) { struct mvneta_port *pp = arg; disable_percpu_irq(pp->dev->irq); } /* Change the device mtu */ static int mvneta_change_mtu(struct net_device *dev, int mtu) { struct mvneta_port *pp = netdev_priv(dev); struct bpf_prog *prog = pp->xdp_prog; int ret; if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } if (prog && !prog->aux->xdp_has_frags && mtu > MVNETA_MAX_RX_BUF_SIZE) { netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n", mtu); return -EINVAL; } dev->mtu = mtu; if (!netif_running(dev)) { if (pp->bm_priv) mvneta_bm_update_mtu(pp, mtu); netdev_update_features(dev); return 0; } /* The interface is running, so we have to force a * reallocation of the queues */ mvneta_stop_dev(pp); on_each_cpu(mvneta_percpu_disable, pp, true); mvneta_cleanup_txqs(pp); mvneta_cleanup_rxqs(pp); if (pp->bm_priv) mvneta_bm_update_mtu(pp, mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); ret = mvneta_setup_rxqs(pp); if (ret) { netdev_err(dev, "unable to setup rxqs after MTU change\n"); return ret; } ret = mvneta_setup_txqs(pp); if (ret) { netdev_err(dev, "unable to setup txqs after MTU change\n"); return ret; } on_each_cpu(mvneta_percpu_enable, pp, true); mvneta_start_dev(pp); netdev_update_features(dev); return 0; } static netdev_features_t mvneta_fix_features(struct net_device *dev, netdev_features_t features) { struct mvneta_port *pp = netdev_priv(dev); if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); netdev_info(dev, "Disable IP checksum for MTU greater than %dB\n", pp->tx_csum_limit); } return features; } /* Get mac address */ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) { u32 mac_addr_l, mac_addr_h; mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); addr[0] = (mac_addr_h >> 24) & 0xFF; addr[1] = (mac_addr_h >> 16) & 0xFF; addr[2] = (mac_addr_h >> 8) & 0xFF; addr[3] = mac_addr_h & 0xFF; addr[4] = (mac_addr_l >> 8) & 0xFF; addr[5] = mac_addr_l & 0xFF; } /* Handle setting mac address */ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) { struct mvneta_port *pp = netdev_priv(dev); struct sockaddr *sockaddr = addr; int ret; ret = eth_prepare_mac_addr_change(dev, addr); if (ret < 0) return ret; /* Remove previous address table entry */ mvneta_mac_addr_set(pp, dev->dev_addr, -1); /* Set new addr in hw */ mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); eth_commit_mac_addr_change(dev, addr); return 0; } static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) { return container_of(pcs, struct mvneta_port, phylink_pcs); } static int mvneta_pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state) { /* We only support QSGMII, SGMII, 802.3z and RGMII modes. * When in 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." */ if (phy_interface_mode_is_8023z(state->interface) && !phylink_test(state->advertising, Autoneg)) return -EINVAL; return 0; } static void mvneta_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_stat; gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); if (gmac_stat & MVNETA_GMAC_SPEED_1000) state->speed = state->interface == PHY_INTERFACE_MODE_2500BASEX ? SPEED_2500 : SPEED_1000; else if (gmac_stat & MVNETA_GMAC_SPEED_100) state->speed = SPEED_100; else state->speed = SPEED_10; state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_RX; if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_TX; } static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 mask, val, an, old_an, changed; mask = MVNETA_GMAC_INBAND_AN_ENABLE | MVNETA_GMAC_INBAND_RESTART_AN | MVNETA_GMAC_AN_SPEED_EN | MVNETA_GMAC_AN_FLOW_CTRL_EN | MVNETA_GMAC_AN_DUPLEX_EN; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { mask |= MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | MVNETA_GMAC_CONFIG_FULL_DUPLEX; val = MVNETA_GMAC_INBAND_AN_ENABLE; if (interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the speed and duplex from PHY */ val |= MVNETA_GMAC_AN_SPEED_EN | MVNETA_GMAC_AN_DUPLEX_EN; } else { /* 802.3z mode has fixed speed and duplex */ val |= MVNETA_GMAC_CONFIG_GMII_SPEED | MVNETA_GMAC_CONFIG_FULL_DUPLEX; /* The FLOW_CTRL_EN bit selects either the hardware * automatically or the CONFIG_FLOW_CTRL manually * controls the GMAC pause mode. */ if (permit_pause_to_mac) val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; /* Update the advertisement bits */ mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; if (phylink_test(advertising, Pause)) val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; } } else { /* Phy or fixed speed - disable in-band AN modes */ val = 0; } old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); an = (an & ~mask) | val; changed = old_an ^ an; if (changed) mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); /* We are only interested in the advertisement bits changing */ return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); } static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) { struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); } static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { .pcs_validate = mvneta_pcs_validate, .pcs_get_state = mvneta_pcs_get_state, .pcs_config = mvneta_pcs_config, .pcs_an_restart = mvneta_pcs_an_restart, }; static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); return &pp->phylink_pcs; } static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); u32 val; if (pp->phy_interface != interface || phylink_autoneg_inband(mode)) { /* Force the link down when changing the interface or if in * in-band mode. According to Armada 370 documentation, we * can only change the port mode and in-band enable when the * link is down. */ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~MVNETA_GMAC_FORCE_LINK_PASS; val |= MVNETA_GMAC_FORCE_LINK_DOWN; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } if (pp->phy_interface != interface) WARN_ON(phy_power_off(pp->comphy)); /* Enable the 1ms clock */ if (phylink_autoneg_inband(mode)) { unsigned long rate = clk_get_rate(pp->clk); mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); } return 0; } static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | MVNETA_GMAC2_PORT_RESET); new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); /* Even though it might look weird, when we're configured in * SGMII or QSGMII mode, the RGMII bit needs to be set. */ new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; if (state->interface == PHY_INTERFACE_MODE_QSGMII || state->interface == PHY_INTERFACE_MODE_SGMII || phy_interface_mode_is_8023z(state->interface)) new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; if (!phylink_autoneg_inband(mode)) { /* Phy or fixed speed - nothing to do, leave the * configured speed, duplex and flow control as-is. */ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the state from the PHY */ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; } else { /* 802.3z negotiation - only 1000base-X */ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; } /* When at 2.5G, the link partner can send frames with shortened * preambles. */ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); if (new_ctrl4 != gmac_ctrl4) mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & MVNETA_GMAC2_PORT_RESET) != 0) continue; } } static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); u32 val, clk; /* Disable 1ms clock if not in in-band mode */ if (!phylink_autoneg_inband(mode)) { clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); } if (pp->phy_interface != interface) /* Enable the Serdes PHY */ WARN_ON(mvneta_config_interface(pp, interface)); /* Allow the link to come up if in in-band mode, otherwise the * link is forced via mac_link_down()/mac_link_up() */ if (phylink_autoneg_inband(mode)) { val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } return 0; } static void mvneta_set_eee(struct mvneta_port *pp, bool enable) { u32 lpi_ctl1; lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); if (enable) lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; else lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); } static void mvneta_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); u32 val; mvneta_port_down(pp); if (!phylink_autoneg_inband(mode)) { val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~MVNETA_GMAC_FORCE_LINK_PASS; val |= MVNETA_GMAC_FORCE_LINK_DOWN; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } pp->eee_active = false; mvneta_set_eee(pp, false); } static void mvneta_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct net_device *ndev = to_net_dev(config->dev); struct mvneta_port *pp = netdev_priv(ndev); u32 val; if (!phylink_autoneg_inband(mode)) { val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | MVNETA_GMAC_CONFIG_FLOW_CTRL | MVNETA_GMAC_CONFIG_FULL_DUPLEX); val |= MVNETA_GMAC_FORCE_LINK_PASS; if (speed == SPEED_1000 || speed == SPEED_2500) val |= MVNETA_GMAC_CONFIG_GMII_SPEED; else if (speed == SPEED_100) val |= MVNETA_GMAC_CONFIG_MII_SPEED; if (duplex == DUPLEX_FULL) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; if (tx_pause || rx_pause) val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } else { /* When inband doesn't cover flow control or flow control is * disabled, we need to manually configure it. This bit will * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. */ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; if (tx_pause || rx_pause) val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } mvneta_port_up(pp); if (phy && pp->eee_enabled) { pp->eee_active = phy_init_eee(phy, false) >= 0; mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); } } static const struct phylink_mac_ops mvneta_phylink_ops = { .mac_select_pcs = mvneta_mac_select_pcs, .mac_prepare = mvneta_mac_prepare, .mac_config = mvneta_mac_config, .mac_finish = mvneta_mac_finish, .mac_link_down = mvneta_mac_link_down, .mac_link_up = mvneta_mac_link_up, }; static int mvneta_mdio_probe(struct mvneta_port *pp) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); if (err) netdev_err(pp->dev, "could not attach PHY: %d\n", err); phylink_ethtool_get_wol(pp->phylink, &wol); device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); /* PHY WoL may be enabled but device wakeup disabled */ if (wol.supported) device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); return err; } static void mvneta_mdio_remove(struct mvneta_port *pp) { phylink_disconnect_phy(pp->phylink); } /* Electing a CPU must be done in an atomic way: it should be done * after or before the removal/insertion of a CPU and this function is * not reentrant. */ static void mvneta_percpu_elect(struct mvneta_port *pp) { int elected_cpu = 0, max_cpu, cpu; /* Use the cpu associated to the rxq when it is online, in all * the other cases, use the cpu 0 which can't be offline. */ if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) elected_cpu = pp->rxq_def; max_cpu = num_present_cpus(); for_each_online_cpu(cpu) { int rxq_map = 0, txq_map = 0; int rxq; for (rxq = 0; rxq < rxq_number; rxq++) if ((rxq % max_cpu) == cpu) rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); if (cpu == elected_cpu) /* Map the default receive queue to the elected CPU */ rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); /* We update the TX queue map only if we have one * queue. In this case we associate the TX queue to * the CPU bound to the default RX queue */ if (txq_number == 1) txq_map = (cpu == elected_cpu) ? MVNETA_CPU_TXQ_ACCESS(0) : 0; else txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & MVNETA_CPU_TXQ_ACCESS_ALL_MASK; mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); /* Update the interrupt mask on each CPU according the * new mapping */ smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, pp, true); } }; static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) { int other_cpu; struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, node_online); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts * are routed to CPU 0, so we don't need all the cpu-hotplug support */ if (pp->neta_armada3700) return 0; spin_lock(&pp->lock); /* * Configuring the driver for a new CPU while the driver is * stopping is racy, so just avoid it. */ if (pp->is_stopped) { spin_unlock(&pp->lock); return 0; } netif_tx_stop_all_queues(pp->dev); /* * We have to synchronise on tha napi of each CPU except the one * just being woken up */ for_each_online_cpu(other_cpu) { if (other_cpu != cpu) { struct mvneta_pcpu_port *other_port = per_cpu_ptr(pp->ports, other_cpu); napi_synchronize(&other_port->napi); } } /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); napi_enable(&port->napi); /* * Enable per-CPU interrupts on the CPU that is * brought up. */ mvneta_percpu_enable(pp); /* * Enable per-CPU interrupt on the one CPU we care * about. */ mvneta_percpu_elect(pp); /* Unmask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); return 0; } static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) { struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, node_online); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); /* * Thanks to this lock we are sure that any pending cpu election is * done. */ spin_lock(&pp->lock); /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); spin_unlock(&pp->lock); napi_synchronize(&port->napi); napi_disable(&port->napi); /* Disable per-CPU interrupts on the CPU that is brought down. */ mvneta_percpu_disable(pp); return 0; } static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, node_dead); /* Check if a new CPU must be elected now this on is down */ spin_lock(&pp->lock); mvneta_percpu_elect(pp); spin_unlock(&pp->lock); /* Unmask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); return 0; } static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); ret = mvneta_setup_rxqs(pp); if (ret) return ret; ret = mvneta_setup_txqs(pp); if (ret) goto err_cleanup_rxqs; /* Connect to port interrupt line */ if (pp->neta_armada3700) ret = request_irq(pp->dev->irq, mvneta_isr, 0, dev->name, pp); else ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, dev->name, pp->ports); if (ret) { netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); goto err_cleanup_txqs; } if (!pp->neta_armada3700) { /* Enable per-CPU interrupt on all the CPU to handle our RX * queue interrupts */ on_each_cpu(mvneta_percpu_enable, pp, true); pp->is_stopped = false; /* Register a CPU notifier to handle the case where our CPU * might be taken offline. */ ret = cpuhp_state_add_instance_nocalls(online_hpstate, &pp->node_online); if (ret) goto err_free_irq; ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, &pp->node_dead); if (ret) goto err_free_online_hp; } ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); goto err_free_dead_hp; } mvneta_start_dev(pp); return 0; err_free_dead_hp: if (!pp->neta_armada3700) cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, &pp->node_dead); err_free_online_hp: if (!pp->neta_armada3700) cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online); err_free_irq: if (pp->neta_armada3700) { free_irq(pp->dev->irq, pp); } else { on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(pp->dev->irq, pp->ports); } err_cleanup_txqs: mvneta_cleanup_txqs(pp); err_cleanup_rxqs: mvneta_cleanup_rxqs(pp); return ret; } /* Stop the port, free port interrupt line */ static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); if (!pp->neta_armada3700) { /* Inform that we are stopping so we don't want to setup the * driver for new CPUs in the notifiers. The code of the * notifier for CPU online is protected by the same spinlock, * so when we get the lock, the notifer work is done. */ spin_lock(&pp->lock); pp->is_stopped = true; spin_unlock(&pp->lock); mvneta_stop_dev(pp); mvneta_mdio_remove(pp); cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online); cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, &pp->node_dead); on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); } else { mvneta_stop_dev(pp); mvneta_mdio_remove(pp); free_irq(dev->irq, pp); } mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); return 0; } static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvneta_port *pp = netdev_priv(dev); return phylink_mii_ioctl(pp->phylink, ifr, cmd); } static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool need_update, running = netif_running(dev); struct mvneta_port *pp = netdev_priv(dev); struct bpf_prog *old_prog; if (prog && !prog->aux->xdp_has_frags && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags"); return -EOPNOTSUPP; } if (pp->bm_priv) { NL_SET_ERR_MSG_MOD(extack, "Hardware Buffer Management not supported on XDP"); return -EOPNOTSUPP; } need_update = !!pp->xdp_prog != !!prog; if (running && need_update) mvneta_stop(dev); old_prog = xchg(&pp->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); if (running && need_update) return mvneta_open(dev); return 0; } static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } /* Ethtool methods */ /* Set link ksettings (phy address, speed) for ethtools */ static int mvneta_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct mvneta_port *pp = netdev_priv(ndev); return phylink_ethtool_ksettings_set(pp->phylink, cmd); } /* Get link ksettings for ethtools */ static int mvneta_ethtool_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct mvneta_port *pp = netdev_priv(ndev); return phylink_ethtool_ksettings_get(pp->phylink, cmd); } static int mvneta_ethtool_nway_reset(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); return phylink_ethtool_nway_reset(pp->phylink); } /* Set interrupt coalescing for ethtools */ static int mvneta_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); int queue; for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); } for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); } return 0; } /* get coalescing for ethtools */ static int mvneta_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); c->rx_coalesce_usecs = pp->rxqs[0].time_coal; c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; return 0; } static void mvneta_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, MVNETA_DRIVER_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->version, MVNETA_DRIVER_VERSION, sizeof(drvinfo->version)); strscpy(drvinfo->bus_info, dev_name(&dev->dev), sizeof(drvinfo->bus_info)); } static void mvneta_ethtool_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(netdev); ring->rx_max_pending = MVNETA_MAX_RXD; ring->tx_max_pending = MVNETA_MAX_TXD; ring->rx_pending = pp->rx_ring_size; ring->tx_pending = pp->tx_ring_size; } static int mvneta_ethtool_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) return -EINVAL; pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? ring->rx_pending : MVNETA_MAX_RXD; pp->tx_ring_size = clamp_t(u16, ring->tx_pending, MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); if (pp->tx_ring_size != ring->tx_pending) netdev_warn(dev, "TX queue size set to %u (requested %u)\n", pp->tx_ring_size, ring->tx_pending); if (netif_running(dev)) { mvneta_stop(dev); if (mvneta_open(dev)) { netdev_err(dev, "error on opening device after ring param change\n"); return -ENOMEM; } } return 0; } static void mvneta_ethtool_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct mvneta_port *pp = netdev_priv(dev); phylink_ethtool_get_pauseparam(pp->phylink, pause); } static int mvneta_ethtool_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct mvneta_port *pp = netdev_priv(dev); return phylink_ethtool_set_pauseparam(pp->phylink, pause); } static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) { int i; for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, mvneta_statistics[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); page_pool_ethtool_stats_get_strings(data); } } static void mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, struct mvneta_ethtool_stats *es) { unsigned int start; int cpu; for_each_possible_cpu(cpu) { struct mvneta_pcpu_stats *stats; u64 skb_alloc_error; u64 refill_error; u64 xdp_redirect; u64 xdp_xmit_err; u64 xdp_tx_err; u64 xdp_pass; u64 xdp_drop; u64 xdp_xmit; u64 xdp_tx; stats = per_cpu_ptr(pp->stats, cpu); do { start = u64_stats_fetch_begin(&stats->syncp); skb_alloc_error = stats->es.skb_alloc_error; refill_error = stats->es.refill_error; xdp_redirect = stats->es.ps.xdp_redirect; xdp_pass = stats->es.ps.xdp_pass; xdp_drop = stats->es.ps.xdp_drop; xdp_xmit = stats->es.ps.xdp_xmit; xdp_xmit_err = stats->es.ps.xdp_xmit_err; xdp_tx = stats->es.ps.xdp_tx; xdp_tx_err = stats->es.ps.xdp_tx_err; } while (u64_stats_fetch_retry(&stats->syncp, start)); es->skb_alloc_error += skb_alloc_error; es->refill_error += refill_error; es->ps.xdp_redirect += xdp_redirect; es->ps.xdp_pass += xdp_pass; es->ps.xdp_drop += xdp_drop; es->ps.xdp_xmit += xdp_xmit; es->ps.xdp_xmit_err += xdp_xmit_err; es->ps.xdp_tx += xdp_tx; es->ps.xdp_tx_err += xdp_tx_err; } } static void mvneta_ethtool_update_stats(struct mvneta_port *pp) { struct mvneta_ethtool_stats stats = {}; const struct mvneta_statistic *s; void __iomem *base = pp->base; u32 high, low; u64 val; int i; mvneta_ethtool_update_pcpu_stats(pp, &stats); for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); val = (u64)high << 32 | low; pp->ethtool_stats[i] += val; break; case T_SW: switch (s->offset) { case ETHTOOL_STAT_EEE_WAKEUP: val = phylink_get_eee_err(pp->phylink); pp->ethtool_stats[i] += val; break; case ETHTOOL_STAT_SKB_ALLOC_ERR: pp->ethtool_stats[i] = stats.skb_alloc_error; break; case ETHTOOL_STAT_REFILL_ERR: pp->ethtool_stats[i] = stats.refill_error; break; case ETHTOOL_XDP_REDIRECT: pp->ethtool_stats[i] = stats.ps.xdp_redirect; break; case ETHTOOL_XDP_PASS: pp->ethtool_stats[i] = stats.ps.xdp_pass; break; case ETHTOOL_XDP_DROP: pp->ethtool_stats[i] = stats.ps.xdp_drop; break; case ETHTOOL_XDP_TX: pp->ethtool_stats[i] = stats.ps.xdp_tx; break; case ETHTOOL_XDP_TX_ERR: pp->ethtool_stats[i] = stats.ps.xdp_tx_err; break; case ETHTOOL_XDP_XMIT: pp->ethtool_stats[i] = stats.ps.xdp_xmit; break; case ETHTOOL_XDP_XMIT_ERR: pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; break; } break; } } } static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) { struct page_pool_stats stats = {}; int i; for (i = 0; i < rxq_number; i++) page_pool_get_stats(pp->rxqs[i].page_pool, &stats); page_pool_ethtool_stats_get(data, &stats); } static void mvneta_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mvneta_port *pp = netdev_priv(dev); int i; mvneta_ethtool_update_stats(pp); for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) *data++ = pp->ethtool_stats[i]; mvneta_ethtool_pp_stats(pp, data); } static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) return ARRAY_SIZE(mvneta_statistics) + page_pool_ethtool_stats_get_count(); return -EOPNOTSUPP; } static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) { return MVNETA_RSS_LU_TABLE_SIZE; } static int mvneta_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = rxq_number; return 0; case ETHTOOL_GRXFH: return -EOPNOTSUPP; default: return -EOPNOTSUPP; } } static int mvneta_config_rss(struct mvneta_port *pp) { int cpu; u32 val; netif_tx_stop_all_queues(pp->dev); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); if (!pp->neta_armada3700) { /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { struct mvneta_pcpu_port *pcpu_port = per_cpu_ptr(pp->ports, cpu); napi_synchronize(&pcpu_port->napi); napi_disable(&pcpu_port->napi); } } else { napi_synchronize(&pp->napi); napi_disable(&pp->napi); } pp->rxq_def = pp->indir[0]; /* Update unicast mapping */ mvneta_set_rx_mode(pp->dev); /* Update val of portCfg register accordingly with all RxQueue types */ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); /* Update the elected CPU matching the new rxq_def */ spin_lock(&pp->lock); mvneta_percpu_elect(pp); spin_unlock(&pp->lock); if (!pp->neta_armada3700) { /* We have to synchronise on the napi of each CPU */ for_each_online_cpu(cpu) { struct mvneta_pcpu_port *pcpu_port = per_cpu_ptr(pp->ports, cpu); napi_enable(&pcpu_port->napi); } } else { napi_enable(&pp->napi); } netif_tx_start_all_queues(pp->dev); return 0; } static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mvneta_port *pp = netdev_priv(dev); /* Current code for Armada 3700 doesn't support RSS features yet */ if (pp->neta_armada3700) return -EOPNOTSUPP; /* We require at least one supported parameter to be changed * and no change in any of the unsupported parameters */ if (key || (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) return -EOPNOTSUPP; if (!indir) return 0; memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); return mvneta_config_rss(pp); } static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct mvneta_port *pp = netdev_priv(dev); /* Current code for Armada 3700 doesn't support RSS features yet */ if (pp->neta_armada3700) return -EOPNOTSUPP; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); return 0; } static void mvneta_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct mvneta_port *pp = netdev_priv(dev); phylink_ethtool_get_wol(pp->phylink, wol); } static int mvneta_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct mvneta_port *pp = netdev_priv(dev); int ret; ret = phylink_ethtool_set_wol(pp->phylink, wol); if (!ret) device_set_wakeup_enable(&dev->dev, !!wol->wolopts); return ret; } static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_eee *eee) { struct mvneta_port *pp = netdev_priv(dev); u32 lpi_ctl0; lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); eee->eee_enabled = pp->eee_enabled; eee->eee_active = pp->eee_active; eee->tx_lpi_enabled = pp->tx_lpi_enabled; eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; return phylink_ethtool_get_eee(pp->phylink, eee); } static int mvneta_ethtool_set_eee(struct net_device *dev, struct ethtool_eee *eee) { struct mvneta_port *pp = netdev_priv(dev); u32 lpi_ctl0; /* The Armada 37x documents do not give limits for this other than * it being an 8-bit register. */ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) return -EINVAL; lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); lpi_ctl0 &= ~(0xff << 8); lpi_ctl0 |= eee->tx_lpi_timer << 8; mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); pp->eee_enabled = eee->eee_enabled; pp->tx_lpi_enabled = eee->tx_lpi_enabled; mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); return phylink_ethtool_set_eee(pp->phylink, eee); } static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) { mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); } static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) { u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); } static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) { unsigned long core_clk_rate; u32 refill_cycles; u32 val; core_clk_rate = clk_get_rate(pp->clk); if (!core_clk_rate) return -EINVAL; refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / (NSEC_PER_SEC / core_clk_rate); if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) return -EINVAL; /* Enable bw limit algorithm version 3 */ val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); /* Set the base refill rate */ mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); return 0; } static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) { u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); } static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, u64 min_rate, u64 max_rate) { u32 refill_val, rem; u32 val = 0; /* Convert to from Bps to bps */ max_rate *= 8; if (min_rate) return -EINVAL; refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, &rem); if (rem || !refill_val || refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) return -EINVAL; val = refill_val; val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); return 0; } static int mvneta_setup_mqprio(struct net_device *dev, struct tc_mqprio_qopt_offload *mqprio) { struct mvneta_port *pp = netdev_priv(dev); int rxq, txq, tc, ret; u8 num_tc; if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) return 0; num_tc = mqprio->qopt.num_tc; if (num_tc > rxq_number) return -EINVAL; mvneta_clear_rx_prio_map(pp); if (!num_tc) { mvneta_disable_per_queue_rate_limit(pp); netdev_reset_tc(dev); return 0; } netdev_set_num_tc(dev, mqprio->qopt.num_tc); for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], mqprio->qopt.offset[tc]); for (rxq = mqprio->qopt.offset[tc]; rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; rxq++) { if (rxq >= rxq_number) return -EINVAL; mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); } } if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { mvneta_disable_per_queue_rate_limit(pp); return 0; } if (mqprio->qopt.num_tc > txq_number) return -EINVAL; ret = mvneta_enable_per_queue_rate_limit(pp); if (ret) return ret; for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { for (txq = mqprio->qopt.offset[tc]; txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; txq++) { if (txq >= txq_number) return -EINVAL; ret = mvneta_setup_queue_rates(pp, txq, mqprio->min_rate[tc], mqprio->max_rate[tc]); if (ret) return ret; } } return 0; } static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_QDISC_MQPRIO: return mvneta_setup_mqprio(dev, type_data); default: return -EOPNOTSUPP; } } static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, .ndo_start_xmit = mvneta_tx, .ndo_set_rx_mode = mvneta_set_rx_mode, .ndo_set_mac_address = mvneta_set_mac_addr, .ndo_change_mtu = mvneta_change_mtu, .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, .ndo_eth_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, .ndo_xdp_xmit = mvneta_xdp_xmit, .ndo_setup_tc = mvneta_setup_tc, }; static const struct ethtool_ops mvneta_eth_tool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = mvneta_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, .get_pauseparam = mvneta_ethtool_get_pauseparam, .set_pauseparam = mvneta_ethtool_set_pauseparam, .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, .get_rxnfc = mvneta_ethtool_get_rxnfc, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, .get_link_ksettings = mvneta_ethtool_get_link_ksettings, .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, .get_eee = mvneta_ethtool_get_eee, .set_eee = mvneta_ethtool_set_eee, }; /* Initialize hw */ static int mvneta_init(struct device *dev, struct mvneta_port *pp) { int queue; /* Disable port */ mvneta_port_disable(pp); /* Set port default values */ mvneta_defaults_set(pp); pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); if (!pp->txqs) return -ENOMEM; /* Initialize TX descriptor rings */ for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; txq->id = queue; txq->size = pp->tx_ring_size; txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; } pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); if (!pp->rxqs) return -ENOMEM; /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; rxq->id = queue; rxq->size = pp->rx_ring_size; rxq->pkts_coal = MVNETA_RX_COAL_PKTS; rxq->time_coal = MVNETA_RX_COAL_USEC; rxq->buf_virt_addr = devm_kmalloc_array(pp->dev->dev.parent, rxq->size, sizeof(*rxq->buf_virt_addr), GFP_KERNEL); if (!rxq->buf_virt_addr) return -ENOMEM; } return 0; } /* platform glue : initialize decoding windows */ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, const struct mbus_dram_target_info *dram) { u32 win_enable; u32 win_protect; int i; for (i = 0; i < 6; i++) { mvreg_write(pp, MVNETA_WIN_BASE(i), 0); mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); if (i < 4) mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); } win_enable = 0x3f; win_protect = 0; if (dram) { for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); mvreg_write(pp, MVNETA_WIN_SIZE(i), (cs->size - 1) & 0xffff0000); win_enable &= ~(1 << i); win_protect |= 3 << (2 * i); } } else { if (pp->neta_ac5) mvreg_write(pp, MVNETA_WIN_BASE(0), (MVNETA_AC5_CNM_DDR_ATTR << 8) | MVNETA_AC5_CNM_DDR_TARGET); /* For Armada3700 open default 4GB Mbus window, leaving * arbitration of target/attribute to a different layer * of configuration. */ mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); win_enable &= ~BIT(0); win_protect = 3; } mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); } /* Power up the port */ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) { /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); if (phy_mode != PHY_INTERFACE_MODE_QSGMII && phy_mode != PHY_INTERFACE_MODE_SGMII && !phy_interface_mode_is_8023z(phy_mode) && !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; return 0; } /* Device initialization routine */ static int mvneta_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct device_node *bm_node; struct mvneta_port *pp; struct net_device *dev; struct phylink *phylink; struct phy *comphy; char hw_mac_addr[ETH_ALEN]; phy_interface_t phy_mode; const char *mac_from; int tx_csum_limit; int err; int cpu; dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) return -ENOMEM; dev->tx_queue_len = MVNETA_MAX_TXD; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvneta_netdev_ops; dev->ethtool_ops = &mvneta_eth_tool_ops; pp = netdev_priv(dev); spin_lock_init(&pp->lock); pp->dn = dn; pp->rxq_def = rxq_def; pp->indir[0] = rxq_def; err = of_get_phy_mode(dn, &phy_mode); if (err) { dev_err(&pdev->dev, "incorrect phy-mode\n"); return err; } pp->phy_interface = phy_mode; comphy = devm_of_phy_get(&pdev->dev, dn, NULL); if (comphy == ERR_PTR(-EPROBE_DEFER)) return -EPROBE_DEFER; if (IS_ERR(comphy)) comphy = NULL; pp->comphy = comphy; pp->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pp->base)) return PTR_ERR(pp->base); /* Get special SoC configurations */ if (of_device_is_compatible(dn, "marvell,armada-3700-neta")) pp->neta_armada3700 = true; if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) { pp->neta_armada3700 = true; pp->neta_ac5 = true; } dev->irq = irq_of_parse_and_map(dn, 0); if (dev->irq == 0) return -EINVAL; pp->clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(pp->clk)) pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); goto err_free_irq; } clk_prepare_enable(pp->clk); pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); if (!IS_ERR(pp->clk_bus)) clk_prepare_enable(pp->clk_bus); pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; pp->phylink_pcs.neg_mode = true; pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.supported_interfaces); if (comphy) { /* If a COMPHY is present, we can support any of the serdes * modes and switch between them. */ __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_1000BASEX, pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_2500BASEX, pp->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { /* No COMPHY, with only 2500BASE-X mode supported */ __set_bit(PHY_INTERFACE_MODE_2500BASEX, pp->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || phy_mode == PHY_INTERFACE_MODE_SGMII) { /* No COMPHY, we can switch between 1000BASE-X and SGMII */ __set_bit(PHY_INTERFACE_MODE_1000BASEX, pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.supported_interfaces); } phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, phy_mode, &mvneta_phylink_ops); if (IS_ERR(phylink)) { err = PTR_ERR(phylink); goto err_clk; } pp->phylink = phylink; /* Alloc per-cpu port structure */ pp->ports = alloc_percpu(struct mvneta_pcpu_port); if (!pp->ports) { err = -ENOMEM; goto err_free_phylink; } /* Alloc per-cpu stats */ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); if (!pp->stats) { err = -ENOMEM; goto err_free_ports; } err = of_get_ethdev_address(dn, dev); if (!err) { mac_from = "device tree"; } else { mvneta_get_mac_addr(pp, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; eth_hw_addr_set(dev, hw_mac_addr); } else { mac_from = "random"; eth_hw_addr_random(dev); } } if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { if (tx_csum_limit < 0 || tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; dev_info(&pdev->dev, "Wrong TX csum limit in DT, set to %dB\n", MVNETA_TX_CSUM_DEF_SIZE); } } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; } else { tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; } pp->tx_csum_limit = tx_csum_limit; pp->dram_target_info = mv_mbus_dram_info(); /* Armada3700 requires setting default configuration of Mbus * windows, however without using filled mbus_dram_target_info * structure. */ if (pp->dram_target_info || pp->neta_armada3700) mvneta_conf_mbus_windows(pp, pp->dram_target_info); pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; pp->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); if (bm_node) { pp->bm_priv = mvneta_bm_get(bm_node); if (pp->bm_priv) { err = mvneta_bm_port_init(pdev, pp); if (err < 0) { dev_info(&pdev->dev, "use SW buffer management\n"); mvneta_bm_put(pp->bm_priv); pp->bm_priv = NULL; } } /* Set RX packet offset correction for platforms, whose * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit * platforms and 0B for 32-bit ones. */ pp->rx_offset_correction = max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION); } of_node_put(bm_node); /* sw buffer management */ if (!pp->bm_priv) pp->rx_offset_correction = MVNETA_SKB_HEADROOM; err = mvneta_init(&pdev->dev, pp); if (err < 0) goto err_netdev; err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); goto err_netdev; } /* Armada3700 network controller does not support per-cpu * operation, so only single NAPI should be initialized. */ if (pp->neta_armada3700) { netif_napi_add(dev, &pp->napi, mvneta_poll); } else { for_each_present_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); netif_napi_add(dev, &port->napi, mvneta_poll); port->pp = pp; } } dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; if (!pp->bm_priv) dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT_SG; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS); /* MTU range: 68 - 9676 */ dev->min_mtu = ETH_MIN_MTU; /* 9676 == 9700 - 20 and rounding to 8 */ dev->max_mtu = 9676; err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); goto err_netdev; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); platform_set_drvdata(pdev, pp->dev); return 0; err_netdev: if (pp->bm_priv) { mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); mvneta_bm_put(pp->bm_priv); } free_percpu(pp->stats); err_free_ports: free_percpu(pp->ports); err_free_phylink: if (pp->phylink) phylink_destroy(pp->phylink); err_clk: clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); err_free_irq: irq_dispose_mapping(dev->irq); return err; } /* Device removal routine */ static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); irq_dispose_mapping(dev->irq); phylink_destroy(pp->phylink); if (pp->bm_priv) { mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); mvneta_bm_put(pp->bm_priv); } return 0; } #ifdef CONFIG_PM_SLEEP static int mvneta_suspend(struct device *device) { int queue; struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); if (!netif_running(dev)) goto clean_exit; if (!pp->neta_armada3700) { spin_lock(&pp->lock); pp->is_stopped = true; spin_unlock(&pp->lock); cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online); cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, &pp->node_dead); } rtnl_lock(); mvneta_stop_dev(pp); rtnl_unlock(); for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_rxq_drop_pkts(pp, rxq); } for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; mvneta_txq_hw_deinit(pp, txq); } clean_exit: netif_device_detach(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); return 0; } static int mvneta_resume(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); int err, queue; clk_prepare_enable(pp->clk); if (!IS_ERR(pp->clk_bus)) clk_prepare_enable(pp->clk_bus); if (pp->dram_target_info || pp->neta_armada3700) mvneta_conf_mbus_windows(pp, pp->dram_target_info); if (pp->bm_priv) { err = mvneta_bm_port_init(pdev, pp); if (err < 0) { dev_info(&pdev->dev, "use SW buffer management\n"); pp->rx_offset_correction = MVNETA_SKB_HEADROOM; pp->bm_priv = NULL; } } mvneta_defaults_set(pp); err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(device, "can't power up port\n"); return err; } netif_device_attach(dev); if (!netif_running(dev)) return 0; for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; rxq->next_desc_to_proc = 0; mvneta_rxq_hw_init(pp, rxq); } for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; txq->next_desc_to_proc = 0; mvneta_txq_hw_init(pp, txq); } if (!pp->neta_armada3700) { spin_lock(&pp->lock); pp->is_stopped = false; spin_unlock(&pp->lock); cpuhp_state_add_instance_nocalls(online_hpstate, &pp->node_online); cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, &pp->node_dead); } rtnl_lock(); mvneta_start_dev(pp); rtnl_unlock(); mvneta_set_rx_mode(dev); return 0; } #endif static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); static const struct of_device_id mvneta_match[] = { { .compatible = "marvell,armada-370-neta" }, { .compatible = "marvell,armada-xp-neta" }, { .compatible = "marvell,armada-3700-neta" }, { .compatible = "marvell,armada-ac5-neta" }, { } }; MODULE_DEVICE_TABLE(of, mvneta_match); static struct platform_driver mvneta_driver = { .probe = mvneta_probe, .remove = mvneta_remove, .driver = { .name = MVNETA_DRIVER_NAME, .of_match_table = mvneta_match, .pm = &mvneta_pm_ops, }, }; static int __init mvneta_driver_init(void) { int ret; BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE); ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online", mvneta_cpu_online, mvneta_cpu_down_prepare); if (ret < 0) goto out; online_hpstate = ret; ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", NULL, mvneta_cpu_dead); if (ret) goto err_dead; ret = platform_driver_register(&mvneta_driver); if (ret) goto err; return 0; err: cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); err_dead: cpuhp_remove_multi_state(online_hpstate); out: return ret; } module_init(mvneta_driver_init); static void __exit mvneta_driver_exit(void) { platform_driver_unregister(&mvneta_driver); cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); cpuhp_remove_multi_state(online_hpstate); } module_exit(mvneta_driver_exit); MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Rami Rosen <[email protected]>, Thomas Petazzoni <[email protected]>"); MODULE_LICENSE("GPL"); module_param(rxq_number, int, 0444); module_param(txq_number, int, 0444); module_param(rxq_def, int, 0444); module_param(rx_copybreak, int, 0644);
linux-master
drivers/net/ethernet/marvell/mvneta.c
/* * Driver for Marvell NETA network controller Buffer Manager. * * Copyright (C) 2015 Marvell * * Marcin Wojtas <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mbus.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> #include "mvneta_bm.h" #define MVNETA_BM_DRIVER_NAME "mvneta_bm" #define MVNETA_BM_DRIVER_VERSION "1.0" static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) { writel(data, priv->reg_base + offset); } static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) { return readl(priv->reg_base + offset); } static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) { u32 val; val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); val |= MVNETA_BM_POOL_ENABLE_MASK; mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); /* Clear BM cause register */ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); } static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) { u32 val; val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); val &= ~MVNETA_BM_POOL_ENABLE_MASK; mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); } static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) { u32 val; val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); val |= mask; mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); } static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) { u32 val; val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); val &= ~mask; mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); } static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, u8 target_id, u8 attr) { u32 val; val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); } int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { struct mvneta_bm_pool *bm_pool = (struct mvneta_bm_pool *)hwbm_pool->priv; struct mvneta_bm *priv = bm_pool->priv; dma_addr_t phys_addr; /* In order to update buf_cookie field of RX descriptor properly, * BM hardware expects buf virtual address to be placed in the * first four bytes of mapped buffer. */ *(u32 *)buf = (u32)buf; phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) return -ENOMEM; mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); return 0; } EXPORT_SYMBOL_GPL(mvneta_bm_construct); /* Create pool */ static int mvneta_bm_pool_create(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool) { struct platform_device *pdev = priv->pdev; u8 target_id, attr; int size_bytes, err; size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, &bm_pool->phys_addr, GFP_KERNEL); if (!bm_pool->virt_addr) return -ENOMEM; if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, bm_pool->phys_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); return -ENOMEM; } err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, &attr); if (err < 0) { dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, bm_pool->phys_addr); return err; } /* Set pool address */ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), bm_pool->phys_addr); mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); mvneta_bm_pool_enable(priv, bm_pool->id); return 0; } /* Notify the driver that BM pool is being used as specific type and return the * pool pointer on success */ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, enum mvneta_bm_type type, u8 port_id, int pkt_size) { struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; int num, err; if (new_pool->type == MVNETA_BM_LONG && new_pool->port_map != 1 << port_id) { dev_err(&priv->pdev->dev, "long pool cannot be shared by the ports\n"); return NULL; } if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { dev_err(&priv->pdev->dev, "mixing pools' types between the ports is forbidden\n"); return NULL; } if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) new_pool->pkt_size = pkt_size; /* Allocate buffers in case BM pool hasn't been used yet */ if (new_pool->type == MVNETA_BM_FREE) { struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; new_pool->priv = priv; new_pool->type = type; new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); hwbm_pool->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); hwbm_pool->construct = mvneta_bm_construct; hwbm_pool->priv = new_pool; mutex_init(&hwbm_pool->buf_lock); /* Create new pool */ err = mvneta_bm_pool_create(priv, new_pool); if (err) { dev_err(&priv->pdev->dev, "fail to create pool %d\n", new_pool->id); return NULL; } /* Allocate buffers for this pool */ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size); if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, hwbm_pool->size); return NULL; } } return new_pool; } EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); /* Free all buffers from the pool */ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map) { int i; bm_pool->port_map &= ~port_map; if (bm_pool->port_map) return; mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { dma_addr_t buf_phys_addr; u32 *vaddr; /* Get buffer physical address (indirect access) */ buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); /* Work-around to the problems when destroying the pool, * when it occurs that a read access to BPPI returns 0. */ if (buf_phys_addr == 0) continue; vaddr = phys_to_virt(buf_phys_addr); if (!vaddr) break; dma_unmap_single(&priv->pdev->dev, buf_phys_addr, bm_pool->buf_size, DMA_FROM_DEVICE); hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); } mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); /* Update BM driver with number of buffers removed from pool */ bm_pool->hwbm_pool.buf_num -= i; } EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); /* Cleanup pool */ void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map) { struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; bm_pool->port_map &= ~port_map; if (bm_pool->port_map) return; bm_pool->type = MVNETA_BM_FREE; mvneta_bm_bufs_free(priv, bm_pool, port_map); if (hwbm_pool->buf_num) WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); if (bm_pool->virt_addr) { dma_free_coherent(&priv->pdev->dev, sizeof(u32) * hwbm_pool->size, bm_pool->virt_addr, bm_pool->phys_addr); bm_pool->virt_addr = NULL; } mvneta_bm_pool_disable(priv, bm_pool->id); } EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); static void mvneta_bm_pools_init(struct mvneta_bm *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct mvneta_bm_pool *bm_pool; char prop[15]; u32 size; int i; /* Activate BM unit */ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); /* Create all pools with maximum size */ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { bm_pool = &priv->bm_pools[i]; bm_pool->id = i; bm_pool->type = MVNETA_BM_FREE; /* Reset read pointer */ mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); /* Reset write pointer */ mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); /* Configure pool size according to DT or use default value */ sprintf(prop, "pool%d,capacity", i); if (of_property_read_u32(dn, prop, &size)) { size = MVNETA_BM_POOL_CAP_DEF; } else if (size > MVNETA_BM_POOL_CAP_MAX) { dev_warn(&priv->pdev->dev, "Illegal pool %d capacity %d, set to %d\n", i, size, MVNETA_BM_POOL_CAP_MAX); size = MVNETA_BM_POOL_CAP_MAX; } else if (size < MVNETA_BM_POOL_CAP_MIN) { dev_warn(&priv->pdev->dev, "Illegal pool %d capacity %d, set to %d\n", i, size, MVNETA_BM_POOL_CAP_MIN); size = MVNETA_BM_POOL_CAP_MIN; } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { dev_warn(&priv->pdev->dev, "Illegal pool %d capacity %d, round to %d\n", i, size, ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN)); size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); } bm_pool->hwbm_pool.size = size; mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), bm_pool->hwbm_pool.size); /* Obtain custom pkt_size from DT */ sprintf(prop, "pool%d,pkt-size", i); if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) bm_pool->pkt_size = 0; } } static void mvneta_bm_default_set(struct mvneta_bm *priv) { u32 val; /* Mask BM all interrupts */ mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); /* Clear BM cause register */ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); /* Set BM configuration register */ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); } static int mvneta_bm_init(struct mvneta_bm *priv) { mvneta_bm_default_set(priv); /* Allocate and initialize BM pools structures */ priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, sizeof(struct mvneta_bm_pool), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; mvneta_bm_pools_init(priv); return 0; } static int mvneta_bm_get_sram(struct device_node *dn, struct mvneta_bm *priv) { priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); if (!priv->bppi_pool) return -ENOMEM; priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, MVNETA_BM_BPPI_SIZE, &priv->bppi_phys_addr); if (!priv->bppi_virt_addr) return -ENOMEM; return 0; } static void mvneta_bm_put_sram(struct mvneta_bm *priv) { gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, MVNETA_BM_BPPI_SIZE); } struct mvneta_bm *mvneta_bm_get(struct device_node *node) { struct platform_device *pdev = of_find_device_by_node(node); return pdev ? platform_get_drvdata(pdev) : NULL; } EXPORT_SYMBOL_GPL(mvneta_bm_get); void mvneta_bm_put(struct mvneta_bm *priv) { platform_device_put(priv->pdev); } EXPORT_SYMBOL_GPL(mvneta_bm_put); static int mvneta_bm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct mvneta_bm *priv; int err; priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); if (!priv) return -ENOMEM; priv->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); err = clk_prepare_enable(priv->clk); if (err < 0) return err; err = mvneta_bm_get_sram(dn, priv); if (err < 0) { dev_err(&pdev->dev, "failed to allocate internal memory\n"); goto err_clk; } priv->pdev = pdev; /* Initialize buffer manager internals */ err = mvneta_bm_init(priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); goto err_sram; } dn->data = priv; platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); return 0; err_sram: mvneta_bm_put_sram(priv); err_clk: clk_disable_unprepare(priv->clk); return err; } static int mvneta_bm_remove(struct platform_device *pdev) { struct mvneta_bm *priv = platform_get_drvdata(pdev); u8 all_ports_map = 0xff; int i = 0; for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); } mvneta_bm_put_sram(priv); /* Dectivate BM unit */ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); clk_disable_unprepare(priv->clk); return 0; } static const struct of_device_id mvneta_bm_match[] = { { .compatible = "marvell,armada-380-neta-bm" }, { } }; MODULE_DEVICE_TABLE(of, mvneta_bm_match); static struct platform_driver mvneta_bm_driver = { .probe = mvneta_bm_probe, .remove = mvneta_bm_remove, .driver = { .name = MVNETA_BM_DRIVER_NAME, .of_match_table = mvneta_bm_match, }, }; module_platform_driver(mvneta_bm_driver); MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/marvell/mvneta_bm.c
// SPDX-License-Identifier: GPL-2.0-only /* * New driver for Marvell Yukon chipset and SysKonnect Gigabit * Ethernet adapters. Based on earlier sk98lin, e100 and * FreeBSD if_sk drivers. * * This driver intentionally does not support all the features * of the original driver such as link fail-over and link management because * those should be done at higher levels. * * Copyright (C) 2004, 2005 Stephen Hemminger <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/mii.h> #include <linux/slab.h> #include <linux/dmi.h> #include <linux/prefetch.h> #include <asm/irq.h> #include "skge.h" #define DRV_NAME "skge" #define DRV_VERSION "1.14" #define DEFAULT_TX_RING_SIZE 128 #define DEFAULT_RX_RING_SIZE 512 #define MAX_TX_RING_SIZE 1024 #define TX_LOW_WATER (MAX_SKB_FRAGS + 1) #define MAX_RX_RING_SIZE 4096 #define RX_COPY_THRESHOLD 128 #define RX_BUF_SIZE 1536 #define PHY_RETRIES 1000 #define ETH_JUMBO_MTU 9000 #define TX_WATCHDOG (5 * HZ) #define BLINK_MS 250 #define LINK_HZ HZ #define SKGE_EEPROM_MAGIC 0x9933aabb MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); MODULE_AUTHOR("Stephen Hemminger <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN); static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static const struct pci_device_id skge_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ #ifdef CONFIG_SKGE_GENESIS { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ #endif { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ { 0 } }; MODULE_DEVICE_TABLE(pci, skge_id_table); static int skge_up(struct net_device *dev); static int skge_down(struct net_device *dev); static void skge_phy_reset(struct skge_port *skge); static void skge_tx_clean(struct net_device *dev); static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static void genesis_get_stats(struct skge_port *skge, u64 *data); static void yukon_get_stats(struct skge_port *skge, u64 *data); static void yukon_init(struct skge_hw *hw, int port); static void genesis_mac_init(struct skge_hw *hw, int port); static void genesis_link_up(struct skge_port *skge); static void skge_set_multicast(struct net_device *dev); static irqreturn_t skge_intr(int irq, void *dev_id); /* Avoid conditionals by using array */ static const int txqaddr[] = { Q_XA1, Q_XA2 }; static const int rxqaddr[] = { Q_R1, Q_R2 }; static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; static inline bool is_genesis(const struct skge_hw *hw) { #ifdef CONFIG_SKGE_GENESIS return hw->chip_id == CHIP_ID_GENESIS; #else return false; #endif } static int skge_get_regs_len(struct net_device *dev) { return 0x4000; } /* * Returns copy of whole control register region * Note: skip RAM address register because accessing it will * cause bus hangs! */ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { const struct skge_port *skge = netdev_priv(dev); const void __iomem *io = skge->hw->regs; regs->version = 1; memset(p, 0, regs->len); memcpy_fromio(p, io, B3_RAM_ADDR); if (regs->len > B3_RI_WTO_R1) { memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, regs->len - B3_RI_WTO_R1); } } /* Wake on Lan only supported on Yukon chips with rev 1 or above */ static u32 wol_supported(const struct skge_hw *hw) { if (is_genesis(hw)) return 0; if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) return 0; return WAKE_MAGIC | WAKE_PHY; } static void skge_wol_init(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 ctrl; skge_write16(hw, B0_CTST, CS_RST_CLR); skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); /* Turn on Vaux */ skge_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); /* WA code for COMA mode -- clear PHY reset */ if (hw->chip_id == CHIP_ID_YUKON_LITE && hw->chip_rev >= CHIP_REV_YU_LITE_A3) { u32 reg = skge_read32(hw, B2_GP_IO); reg |= GP_DIR_9; reg &= ~GP_IO_9; skge_write32(hw, B2_GP_IO, reg); } skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_DIS_SLEEP | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | GPC_ANEG_1 | GPC_RST_SET); skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_DIS_SLEEP | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | GPC_ANEG_1 | GPC_RST_CLR); skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); /* Force to 10/100 skge_reset will re-enable on resume */ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, (PHY_AN_100FULL | PHY_AN_100HALF | PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); /* no 1000 HD/FD */ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | PHY_CT_RE_CFG | PHY_CT_DUP_MD); /* Set GMAC to no flow control and auto update for speed/duplex */ gma_write16(hw, port, GM_GP_CTRL, GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); /* Set WOL address */ memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), skge->netdev->dev_addr, ETH_ALEN); /* Turn on appropriate WOL control bits */ skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); ctrl = 0; if (skge->wol & WAKE_PHY) ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; if (skge->wol & WAKE_MAGIC) ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; else ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* block receiver */ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); } static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct skge_port *skge = netdev_priv(dev); wol->supported = wol_supported(skge->hw); wol->wolopts = skge->wol; } static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; if ((wol->wolopts & ~wol_supported(hw)) || !device_can_wakeup(&hw->pdev->dev)) return -EOPNOTSUPP; skge->wol = wol->wolopts; device_set_wakeup_enable(&hw->pdev->dev, skge->wol); return 0; } /* Determine supported/advertised modes based on hardware. * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx */ static u32 skge_supported_modes(const struct skge_hw *hw) { u32 supported; if (hw->copper) { supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); if (is_genesis(hw)) supported &= ~(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full); else if (hw->chip_id == CHIP_ID_YUKON) supported &= ~SUPPORTED_1000baseT_Half; } else supported = (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_FIBRE | SUPPORTED_Autoneg); return supported; } static int skge_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; u32 supported, advertising; supported = skge_supported_modes(hw); if (hw->copper) { cmd->base.port = PORT_TP; cmd->base.phy_address = hw->phy_addr; } else cmd->base.port = PORT_FIBRE; advertising = skge->advertising; cmd->base.autoneg = skge->autoneg; cmd->base.speed = skge->speed; cmd->base.duplex = skge->duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int skge_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; u32 supported = skge_supported_modes(hw); int err = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); if (cmd->base.autoneg == AUTONEG_ENABLE) { advertising = supported; skge->duplex = -1; skge->speed = -1; } else { u32 setting; u32 speed = cmd->base.speed; switch (speed) { case SPEED_1000: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_1000baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_1000baseT_Half; else return -EINVAL; break; case SPEED_100: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_100baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_100baseT_Half; else return -EINVAL; break; case SPEED_10: if (cmd->base.duplex == DUPLEX_FULL) setting = SUPPORTED_10baseT_Full; else if (cmd->base.duplex == DUPLEX_HALF) setting = SUPPORTED_10baseT_Half; else return -EINVAL; break; default: return -EINVAL; } if ((setting & supported) == 0) return -EINVAL; skge->speed = speed; skge->duplex = cmd->base.duplex; } skge->autoneg = cmd->base.autoneg; skge->advertising = advertising; if (netif_running(dev)) { skge_down(dev); err = skge_up(dev); if (err) { dev_close(dev); return err; } } return 0; } static void skge_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct skge_port *skge = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(skge->hw->pdev), sizeof(info->bus_info)); } static const struct skge_stat { char name[ETH_GSTRING_LEN]; u16 xmac_offset; u16 gma_offset; } skge_stats[] = { { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, }; static int skge_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(skge_stats); default: return -EOPNOTSUPP; } } static void skge_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct skge_port *skge = netdev_priv(dev); if (is_genesis(skge->hw)) genesis_get_stats(skge, data); else yukon_get_stats(skge, data); } /* Use hardware MIB variables for critical path statistics and * transmit feedback not reported at interrupt. * Other errors are accounted for in interrupt handler. */ static struct net_device_stats *skge_get_stats(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); u64 data[ARRAY_SIZE(skge_stats)]; if (is_genesis(skge->hw)) genesis_get_stats(skge, data); else yukon_get_stats(skge, data); dev->stats.tx_bytes = data[0]; dev->stats.rx_bytes = data[1]; dev->stats.tx_packets = data[2] + data[4] + data[6]; dev->stats.rx_packets = data[3] + data[5] + data[7]; dev->stats.multicast = data[3] + data[5]; dev->stats.collisions = data[10]; dev->stats.tx_aborted_errors = data[12]; return &dev->stats; } static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(skge_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, skge_stats[i].name, ETH_GSTRING_LEN); break; } } static void skge_get_ring_param(struct net_device *dev, struct ethtool_ringparam *p, struct kernel_ethtool_ringparam *kernel_p, struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); p->rx_max_pending = MAX_RX_RING_SIZE; p->tx_max_pending = MAX_TX_RING_SIZE; p->rx_pending = skge->rx_ring.count; p->tx_pending = skge->tx_ring.count; } static int skge_set_ring_param(struct net_device *dev, struct ethtool_ringparam *p, struct kernel_ethtool_ringparam *kernel_p, struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); int err = 0; if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) return -EINVAL; skge->rx_ring.count = p->rx_pending; skge->tx_ring.count = p->tx_pending; if (netif_running(dev)) { skge_down(dev); err = skge_up(dev); if (err) dev_close(dev); } return err; } static u32 skge_get_msglevel(struct net_device *netdev) { struct skge_port *skge = netdev_priv(netdev); return skge->msg_enable; } static void skge_set_msglevel(struct net_device *netdev, u32 value) { struct skge_port *skge = netdev_priv(netdev); skge->msg_enable = value; } static int skge_nway_reset(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) return -EINVAL; skge_phy_reset(skge); return 0; } static void skge_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct skge_port *skge = netdev_priv(dev); ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || (skge->flow_control == FLOW_MODE_SYM_OR_REM)); ecmd->tx_pause = (ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND)); ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; } static int skge_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { struct skge_port *skge = netdev_priv(dev); struct ethtool_pauseparam old; int err = 0; skge_get_pauseparam(dev, &old); if (ecmd->autoneg != old.autoneg) skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; else { if (ecmd->rx_pause && ecmd->tx_pause) skge->flow_control = FLOW_MODE_SYMMETRIC; else if (ecmd->rx_pause && !ecmd->tx_pause) skge->flow_control = FLOW_MODE_SYM_OR_REM; else if (!ecmd->rx_pause && ecmd->tx_pause) skge->flow_control = FLOW_MODE_LOC_SEND; else skge->flow_control = FLOW_MODE_NONE; } if (netif_running(dev)) { skge_down(dev); err = skge_up(dev); if (err) { dev_close(dev); return err; } } return 0; } /* Chip internal frequency for clock calculations */ static inline u32 hwkhz(const struct skge_hw *hw) { return is_genesis(hw) ? 53125 : 78125; } /* Chip HZ to microseconds */ static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) { return (ticks * 1000) / hwkhz(hw); } /* Microseconds to chip HZ */ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) { return hwkhz(hw) * usec / 1000; } static int skge_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; ecmd->rx_coalesce_usecs = 0; ecmd->tx_coalesce_usecs = 0; if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); u32 msk = skge_read32(hw, B2_IRQM_MSK); if (msk & rxirqmask[port]) ecmd->rx_coalesce_usecs = delay; if (msk & txirqmask[port]) ecmd->tx_coalesce_usecs = delay; } return 0; } /* Note: interrupt timer is per board, but can turn on/off per port */ static int skge_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; u32 msk = skge_read32(hw, B2_IRQM_MSK); u32 delay = 25; if (ecmd->rx_coalesce_usecs == 0) msk &= ~rxirqmask[port]; else if (ecmd->rx_coalesce_usecs < 25 || ecmd->rx_coalesce_usecs > 33333) return -EINVAL; else { msk |= rxirqmask[port]; delay = ecmd->rx_coalesce_usecs; } if (ecmd->tx_coalesce_usecs == 0) msk &= ~txirqmask[port]; else if (ecmd->tx_coalesce_usecs < 25 || ecmd->tx_coalesce_usecs > 33333) return -EINVAL; else { msk |= txirqmask[port]; delay = min(delay, ecmd->rx_coalesce_usecs); } skge_write32(hw, B2_IRQM_MSK, msk); if (msk == 0) skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); else { skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); skge_write32(hw, B2_IRQM_CTRL, TIM_START); } return 0; } enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; static void skge_led(struct skge_port *skge, enum led_mode mode) { struct skge_hw *hw = skge->hw; int port = skge->port; spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) { switch (mode) { case LED_MODE_OFF: if (hw->phy_type == SK_PHY_BCOM) xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); else { skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); } skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); break; case LED_MODE_ON: skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); break; case LED_MODE_TST: skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); if (hw->phy_type == SK_PHY_BCOM) xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); else { skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); } } } else { switch (mode) { case LED_MODE_OFF: gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(MO_LED_OFF) | PHY_M_LED_MO_10(MO_LED_OFF) | PHY_M_LED_MO_100(MO_LED_OFF) | PHY_M_LED_MO_1000(MO_LED_OFF) | PHY_M_LED_MO_RX(MO_LED_OFF)); break; case LED_MODE_ON: gm_phy_write(hw, port, PHY_MARV_LED_CTRL, PHY_M_LED_PULS_DUR(PULS_170MS) | PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL | PHY_M_LEDC_DP_CTRL); gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_RX(MO_LED_OFF) | (skge->speed == SPEED_100 ? PHY_M_LED_MO_100(MO_LED_ON) : 0)); break; case LED_MODE_TST: gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(MO_LED_ON) | PHY_M_LED_MO_10(MO_LED_ON) | PHY_M_LED_MO_100(MO_LED_ON) | PHY_M_LED_MO_1000(MO_LED_ON) | PHY_M_LED_MO_RX(MO_LED_ON)); } } spin_unlock_bh(&hw->phy_lock); } /* blink LED's for finding board */ static int skge_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct skge_port *skge = netdev_priv(dev); switch (state) { case ETHTOOL_ID_ACTIVE: return 2; /* cycle on/off twice per second */ case ETHTOOL_ID_ON: skge_led(skge, LED_MODE_TST); break; case ETHTOOL_ID_OFF: skge_led(skge, LED_MODE_OFF); break; case ETHTOOL_ID_INACTIVE: /* back to regular LED state */ skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); } return 0; } static int skge_get_eeprom_len(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); u32 reg2; pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2); return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) { u32 val; pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); do { pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); } while (!(offset & PCI_VPD_ADDR_F)); pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); return val; } static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) { pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); do { pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); } while (offset & PCI_VPD_ADDR_F); } static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct skge_port *skge = netdev_priv(dev); struct pci_dev *pdev = skge->hw->pdev; int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); int length = eeprom->len; u16 offset = eeprom->offset; if (!cap) return -EINVAL; eeprom->magic = SKGE_EEPROM_MAGIC; while (length > 0) { u32 val = skge_vpd_read(pdev, cap, offset); int n = min_t(int, length, sizeof(val)); memcpy(data, &val, n); length -= n; data += n; offset += n; } return 0; } static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct skge_port *skge = netdev_priv(dev); struct pci_dev *pdev = skge->hw->pdev; int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); int length = eeprom->len; u16 offset = eeprom->offset; if (!cap) return -EINVAL; if (eeprom->magic != SKGE_EEPROM_MAGIC) return -EINVAL; while (length > 0) { u32 val; int n = min_t(int, length, sizeof(val)); if (n < sizeof(val)) val = skge_vpd_read(pdev, cap, offset); memcpy(&val, data, n); skge_vpd_write(pdev, cap, offset, val); length -= n; data += n; offset += n; } return 0; } static const struct ethtool_ops skge_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = skge_get_drvinfo, .get_regs_len = skge_get_regs_len, .get_regs = skge_get_regs, .get_wol = skge_get_wol, .set_wol = skge_set_wol, .get_msglevel = skge_get_msglevel, .set_msglevel = skge_set_msglevel, .nway_reset = skge_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = skge_get_eeprom_len, .get_eeprom = skge_get_eeprom, .set_eeprom = skge_set_eeprom, .get_ringparam = skge_get_ring_param, .set_ringparam = skge_set_ring_param, .get_pauseparam = skge_get_pauseparam, .set_pauseparam = skge_set_pauseparam, .get_coalesce = skge_get_coalesce, .set_coalesce = skge_set_coalesce, .get_strings = skge_get_strings, .set_phys_id = skge_set_phys_id, .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, .get_link_ksettings = skge_get_link_ksettings, .set_link_ksettings = skge_set_link_ksettings, }; /* * Allocate ring elements and chain them together * One-to-one association of board descriptors with ring elements */ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) { struct skge_tx_desc *d; struct skge_element *e; int i; ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); if (!ring->start) return -ENOMEM; for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { e->desc = d; if (i == ring->count - 1) { e->next = ring->start; d->next_offset = base; } else { e->next = e + 1; d->next_offset = base + (i+1) * sizeof(*d); } } ring->to_use = ring->to_clean = ring->start; return 0; } /* Allocate and setup a new buffer for receiving */ static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; dma_addr_t map; map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, DMA_FROM_DEVICE); if (dma_mapping_error(&skge->hw->pdev->dev, map)) return -1; rd->dma_lo = lower_32_bits(map); rd->dma_hi = upper_32_bits(map); e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; rd->csum1 = 0; rd->csum2 = 0; wmb(); rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); return 0; } /* Resume receiving using existing skb, * Note: DMA address is not changed by chip. * MTU not changed while receiver active. */ static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) { struct skge_rx_desc *rd = e->desc; rd->csum2 = 0; rd->csum2_start = ETH_HLEN; wmb(); rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; } /* Free all buffers in receive ring, assumes receiver stopped */ static void skge_rx_clean(struct skge_port *skge) { struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; e = ring->start; do { struct skge_rx_desc *rd = e->desc; rd->control = 0; if (e->skb) { dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_FROM_DEVICE); dev_kfree_skb(e->skb); e->skb = NULL; } } while ((e = e->next) != ring->start); } /* Allocate buffers for receive ring * For receive: to_clean is next received frame. */ static int skge_rx_fill(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; e = ring->start; do { struct sk_buff *skb; skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { dev_kfree_skb(skb); return -EIO; } } while ((e = e->next) != ring->start); ring->to_clean = ring->start; return 0; } static const char *skge_pause(enum pause_status status) { switch (status) { case FLOW_STAT_NONE: return "none"; case FLOW_STAT_REM_SEND: return "rx only"; case FLOW_STAT_LOC_SEND: return "tx_only"; case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ return "both"; default: return "indeterminated"; } } static void skge_link_up(struct skge_port *skge) { skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_BLK_OFF|LED_SYNC_OFF|LED_REG_ON); netif_carrier_on(skge->netdev); netif_wake_queue(skge->netdev); netif_info(skge, link, skge->netdev, "Link is up at %d Mbps, %s duplex, flow control %s\n", skge->speed, skge->duplex == DUPLEX_FULL ? "full" : "half", skge_pause(skge->flow_status)); } static void skge_link_down(struct skge_port *skge) { skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); netif_carrier_off(skge->netdev); netif_stop_queue(skge->netdev); netif_info(skge, link, skge->netdev, "Link is down\n"); } static void xm_link_down(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); if (netif_carrier_ok(dev)) skge_link_down(skge); } static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) { int i; xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); *val = xm_read16(hw, port, XM_PHY_DATA); if (hw->phy_type == SK_PHY_XMAC) goto ready; for (i = 0; i < PHY_RETRIES; i++) { if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) goto ready; udelay(1); } return -ETIMEDOUT; ready: *val = xm_read16(hw, port, XM_PHY_DATA); return 0; } static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__xm_phy_read(hw, port, reg, &v)) pr_warn("%s: phy read timed out\n", hw->dev[port]->name); return v; } static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) { int i; xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); for (i = 0; i < PHY_RETRIES; i++) { if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) goto ready; udelay(1); } return -EIO; ready: xm_write16(hw, port, XM_PHY_DATA, val); for (i = 0; i < PHY_RETRIES; i++) { if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) return 0; udelay(1); } return -ETIMEDOUT; } static void genesis_init(struct skge_hw *hw) { /* set blink source counter */ skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); skge_write8(hw, B2_BSC_CTRL, BSC_START); /* configure mac arbiter */ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); /* configure mac arbiter timeout values */ skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); skge_write8(hw, B3_MA_RCINI_RX1, 0); skge_write8(hw, B3_MA_RCINI_RX2, 0); skge_write8(hw, B3_MA_RCINI_TX1, 0); skge_write8(hw, B3_MA_RCINI_TX2, 0); /* configure packet arbiter timeout */ skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); } static void genesis_reset(struct skge_hw *hw, int port) { static const u8 zero[8] = { 0 }; u32 reg; skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); /* reset the statistics module */ xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ /* disable Broadcom PHY IRQ */ if (hw->phy_type == SK_PHY_BCOM) xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); xm_outhash(hw, port, XM_HSM, zero); /* Flush TX and RX fifo */ reg = xm_read32(hw, port, XM_MODE); xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); } /* Convert mode to MII values */ static const u16 phy_pause_map[] = { [FLOW_MODE_NONE] = 0, [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, }; /* special defines for FIBER (88E1011S only) */ static const u16 fiber_pause_map[] = { [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, }; /* Check status of Broadcom phy link */ static void bcom_check_link(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); u16 status; /* read twice because of latch */ xm_phy_read(hw, port, PHY_BCOM_STAT); status = xm_phy_read(hw, port, PHY_BCOM_STAT); if ((status & PHY_ST_LSYNC) == 0) { xm_link_down(hw, port); return; } if (skge->autoneg == AUTONEG_ENABLE) { u16 lpa, aux; if (!(status & PHY_ST_AN_OVER)) return; lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); if (lpa & PHY_B_AN_RF) { netdev_notice(dev, "remote fault\n"); return; } aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); /* Check Duplex mismatch */ switch (aux & PHY_B_AS_AN_RES_MSK) { case PHY_B_RES_1000FD: skge->duplex = DUPLEX_FULL; break; case PHY_B_RES_1000HD: skge->duplex = DUPLEX_HALF; break; default: netdev_notice(dev, "duplex mismatch\n"); return; } /* We are using IEEE 802.3z/D5.0 Table 37-4 */ switch (aux & PHY_B_AS_PAUSE_MSK) { case PHY_B_AS_PAUSE_MSK: skge->flow_status = FLOW_STAT_SYMMETRIC; break; case PHY_B_AS_PRR: skge->flow_status = FLOW_STAT_REM_SEND; break; case PHY_B_AS_PRT: skge->flow_status = FLOW_STAT_LOC_SEND; break; default: skge->flow_status = FLOW_STAT_NONE; } skge->speed = SPEED_1000; } if (!netif_carrier_ok(dev)) genesis_link_up(skge); } /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional * Phy on for 100 or 10Mbit operation */ static void bcom_phy_init(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; int i; u16 id1, r, ext, ctl; /* magic workaround patterns for Broadcom */ static const struct { u16 reg; u16 val; } A1hack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, }, C0hack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, }; /* read Id from external PHY (all have the same address) */ id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); /* Optimize MDIO transfer by suppressing preamble. */ r = xm_read16(hw, port, XM_MMU_CMD); r |= XM_MMU_NO_PRE; xm_write16(hw, port, XM_MMU_CMD, r); switch (id1) { case PHY_BCOM_ID1_C0: /* * Workaround BCOM Errata for the C0 type. * Write magic patterns to reserved registers. */ for (i = 0; i < ARRAY_SIZE(C0hack); i++) xm_phy_write(hw, port, C0hack[i].reg, C0hack[i].val); break; case PHY_BCOM_ID1_A1: /* * Workaround BCOM Errata for the A1 type. * Write magic patterns to reserved registers. */ for (i = 0; i < ARRAY_SIZE(A1hack); i++) xm_phy_write(hw, port, A1hack[i].reg, A1hack[i].val); break; } /* * Workaround BCOM Errata (#10523) for all BCom PHYs. * Disable Power Management after reset. */ r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); r |= PHY_B_AC_DIS_PM; xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); /* Dummy read */ xm_read16(hw, port, XM_ISRC); ext = PHY_B_PEC_EN_LTR; /* enable tx led */ ctl = PHY_CT_SP1000; /* always 1000mbit */ if (skge->autoneg == AUTONEG_ENABLE) { /* * Workaround BCOM Errata #1 for the C5 type. * 1000Base-T Link Acquisition Failure in Slave Mode * Set Repeater/DTE bit 10 of the 1000Base-T Control Register */ u16 adv = PHY_B_1000C_RD; if (skge->advertising & ADVERTISED_1000baseT_Half) adv |= PHY_B_1000C_AHD; if (skge->advertising & ADVERTISED_1000baseT_Full) adv |= PHY_B_1000C_AFD; xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; } else { if (skge->duplex == DUPLEX_FULL) ctl |= PHY_CT_DUP_MD; /* Force to slave */ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); } /* Set autonegotiation pause parameters */ xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, phy_pause_map[skge->flow_control] | PHY_AN_CSMA); /* Handle Jumbo frames */ if (hw->dev[port]->mtu > ETH_DATA_LEN) { xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); ext |= PHY_B_PEC_HIGH_LA; } xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); /* Use link status change interrupt */ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); } static void xm_phy_init(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 ctrl = 0; if (skge->autoneg == AUTONEG_ENABLE) { if (skge->advertising & ADVERTISED_1000baseT_Half) ctrl |= PHY_X_AN_HD; if (skge->advertising & ADVERTISED_1000baseT_Full) ctrl |= PHY_X_AN_FD; ctrl |= fiber_pause_map[skge->flow_control]; xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); /* Restart Auto-negotiation */ ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; } else { /* Set DuplexMode in Config register */ if (skge->duplex == DUPLEX_FULL) ctrl |= PHY_CT_DUP_MD; /* * Do NOT enable Auto-negotiation here. This would hold * the link down because no IDLEs are transmitted */ } xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); /* Poll PHY for status changes */ mod_timer(&skge->link_timer, jiffies + LINK_HZ); } static int xm_check_link(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; u16 status; /* read twice because of latch */ xm_phy_read(hw, port, PHY_XMAC_STAT); status = xm_phy_read(hw, port, PHY_XMAC_STAT); if ((status & PHY_ST_LSYNC) == 0) { xm_link_down(hw, port); return 0; } if (skge->autoneg == AUTONEG_ENABLE) { u16 lpa, res; if (!(status & PHY_ST_AN_OVER)) return 0; lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); if (lpa & PHY_B_AN_RF) { netdev_notice(dev, "remote fault\n"); return 0; } res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); /* Check Duplex mismatch */ switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { case PHY_X_RS_FD: skge->duplex = DUPLEX_FULL; break; case PHY_X_RS_HD: skge->duplex = DUPLEX_HALF; break; default: netdev_notice(dev, "duplex mismatch\n"); return 0; } /* We are using IEEE 802.3z/D5.0 Table 37-4 */ if ((skge->flow_control == FLOW_MODE_SYMMETRIC || skge->flow_control == FLOW_MODE_SYM_OR_REM) && (lpa & PHY_X_P_SYM_MD)) skge->flow_status = FLOW_STAT_SYMMETRIC; else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) /* Enable PAUSE receive, disable PAUSE transmit */ skge->flow_status = FLOW_STAT_REM_SEND; else if (skge->flow_control == FLOW_MODE_LOC_SEND && (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) /* Disable PAUSE receive, enable PAUSE transmit */ skge->flow_status = FLOW_STAT_LOC_SEND; else skge->flow_status = FLOW_STAT_NONE; skge->speed = SPEED_1000; } if (!netif_carrier_ok(dev)) genesis_link_up(skge); return 1; } /* Poll to check for link coming up. * * Since internal PHY is wired to a level triggered pin, can't * get an interrupt when carrier is detected, need to poll for * link coming up. */ static void xm_link_timer(struct timer_list *t) { struct skge_port *skge = from_timer(skge, t, link_timer); struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; int port = skge->port; int i; unsigned long flags; if (!netif_running(dev)) return; spin_lock_irqsave(&hw->phy_lock, flags); /* * Verify that the link by checking GPIO register three times. * This pin has the signal from the link_sync pin connected to it. */ for (i = 0; i < 3; i++) { if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) goto link_down; } /* Re-enable interrupt to detect link down */ if (xm_check_link(dev)) { u16 msk = xm_read16(hw, port, XM_IMSK); msk &= ~XM_IS_INP_ASS; xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); } else { link_down: mod_timer(&skge->link_timer, round_jiffies(jiffies + LINK_HZ)); } spin_unlock_irqrestore(&hw->phy_lock, flags); } static void genesis_mac_init(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; int i; u32 r; static const u8 zero[6] = { 0 }; for (i = 0; i < 10; i++) { skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) goto reset_ok; udelay(1); } netdev_warn(dev, "genesis reset failed\n"); reset_ok: /* Unreset the XMAC. */ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (hw->phy_type != SK_PHY_XMAC) { /* Take external Phy out of reset */ r = skge_read32(hw, B2_GP_IO); if (port == 0) r |= GP_DIR_0|GP_IO_0; else r |= GP_DIR_2|GP_IO_2; skge_write32(hw, B2_GP_IO, r); /* Enable GMII interface */ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); } switch (hw->phy_type) { case SK_PHY_XMAC: xm_phy_init(skge); break; case SK_PHY_BCOM: bcom_phy_init(skge); bcom_check_link(hw, port); } /* Set Station Address */ xm_outaddr(hw, port, XM_SA, dev->dev_addr); /* We don't use match addresses so clear */ for (i = 1; i < 16; i++) xm_outaddr(hw, port, XM_EXM(i), zero); /* Clear MIB counters */ xm_write16(hw, port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); /* Clear two times according to Errata #3 */ xm_write16(hw, port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC); /* configure Rx High Water Mark (XM_RX_HI_WM) */ xm_write16(hw, port, XM_RX_HI_WM, 1450); /* We don't need the FCS appended to the packet. */ r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; if (jumbo) r |= XM_RX_BIG_PK_OK; if (skge->duplex == DUPLEX_HALF) { /* * If in manual half duplex mode the other side might be in * full duplex mode, so ignore if a carrier extension is not seen * on frames received */ r |= XM_RX_DIS_CEXT; } xm_write16(hw, port, XM_RX_CMD, r); /* We want short frames padded to 60 bytes. */ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); /* Increase threshold for jumbo frames on dual port */ if (hw->ports > 1 && jumbo) xm_write16(hw, port, XM_TX_THR, 1020); else xm_write16(hw, port, XM_TX_THR, 512); /* * Enable the reception of all error frames. This is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transferring frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ xm_write32(hw, port, XM_MODE, XM_DEF_MODE); /* * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) * - Enable all bits excepting 'Octets Rx OK Low CntOv' * and 'Octets Rx OK Hi Cnt Ov'. */ xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); /* * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) * - Enable all bits excepting 'Octets Tx OK Low CntOv' * and 'Octets Tx OK Hi Cnt Ov'. */ xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); /* Configure MAC arbiter */ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); /* configure timeout values */ skge_write8(hw, B3_MA_TOINI_RX1, 72); skge_write8(hw, B3_MA_TOINI_RX2, 72); skge_write8(hw, B3_MA_TOINI_TX1, 72); skge_write8(hw, B3_MA_TOINI_TX2, 72); skge_write8(hw, B3_MA_RCINI_RX1, 0); skge_write8(hw, B3_MA_RCINI_RX2, 0); skge_write8(hw, B3_MA_RCINI_TX1, 0); skge_write8(hw, B3_MA_RCINI_TX2, 0); /* Configure Rx MAC FIFO */ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); /* Configure Tx MAC FIFO */ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); if (jumbo) { /* Enable frame flushing if jumbo frames used */ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); } else { /* enable timeout timers if normal frames */ skge_write16(hw, B3_PA_CTRL, (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); } } static void genesis_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; unsigned retries = 1000; u16 cmd; /* Disable Tx and Rx */ cmd = xm_read16(hw, port, XM_MMU_CMD); cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); xm_write16(hw, port, XM_MMU_CMD, cmd); genesis_reset(hw, port); /* Clear Tx packet arbiter timeout IRQ */ skge_write16(hw, B3_PA_CTRL, port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); /* Reset the MAC */ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); do { skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) break; } while (--retries > 0); /* For external PHYs there must be special handling */ if (hw->phy_type != SK_PHY_XMAC) { u32 reg = skge_read32(hw, B2_GP_IO); if (port == 0) { reg |= GP_DIR_0; reg &= ~GP_IO_0; } else { reg |= GP_DIR_2; reg &= ~GP_IO_2; } skge_write32(hw, B2_GP_IO, reg); skge_read32(hw, B2_GP_IO); } xm_write16(hw, port, XM_MMU_CMD, xm_read16(hw, port, XM_MMU_CMD) & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); xm_read16(hw, port, XM_MMU_CMD); } static void genesis_get_stats(struct skge_port *skge, u64 *data) { struct skge_hw *hw = skge->hw; int port = skge->port; int i; unsigned long timeout = jiffies + HZ; xm_write16(hw, port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); /* wait for update to complete */ while (xm_read16(hw, port, XM_STAT_CMD) & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { if (time_after(jiffies, timeout)) break; udelay(10); } /* special case for 64 bit octet counter */ data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 | xm_read32(hw, port, XM_TXO_OK_LO); data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 | xm_read32(hw, port, XM_RXO_OK_LO); for (i = 2; i < ARRAY_SIZE(skge_stats); i++) data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); } static void genesis_mac_intr(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); u16 status = xm_read16(hw, port, XM_ISRC); netif_printk(skge, intr, KERN_DEBUG, skge->netdev, "mac interrupt status 0x%x\n", status); if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { xm_link_down(hw, port); mod_timer(&skge->link_timer, jiffies + 1); } if (status & XM_IS_TXF_UR) { xm_write32(hw, port, XM_MODE, XM_MD_FTF); ++dev->stats.tx_fifo_errors; } } static void genesis_link_up(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 cmd, msk; u32 mode; cmd = xm_read16(hw, port, XM_MMU_CMD); /* * enabling pause frame reception is required for 1000BT * because the XMAC is not reset if the link is going down */ if (skge->flow_status == FLOW_STAT_NONE || skge->flow_status == FLOW_STAT_LOC_SEND) /* Disable Pause Frame Reception */ cmd |= XM_MMU_IGN_PF; else /* Enable Pause Frame Reception */ cmd &= ~XM_MMU_IGN_PF; xm_write16(hw, port, XM_MMU_CMD, cmd); mode = xm_read32(hw, port, XM_MODE); if (skge->flow_status == FLOW_STAT_SYMMETRIC || skge->flow_status == FLOW_STAT_LOC_SEND) { /* * Configure Pause Frame Generation * Use internal and external Pause Frame Generation. * Sending pause frames is edge triggered. * Send a Pause frame with the maximum pause time if * internal oder external FIFO full condition occurs. * Send a zero pause time frame to re-start transmission. */ /* XM_PAUSE_DA = '010000C28001' (default) */ /* XM_MAC_PTIME = 0xffff (maximum) */ /* remember this value is defined in big endian (!) */ xm_write16(hw, port, XM_MAC_PTIME, 0xffff); mode |= XM_PAUSE_MODE; skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); } else { /* * disable pause frame generation is required for 1000BT * because the XMAC is not reset if the link is going down */ /* Disable Pause Mode in Mode Register */ mode &= ~XM_PAUSE_MODE; skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); } xm_write32(hw, port, XM_MODE, mode); /* Turn on detection of Tx underrun */ msk = xm_read16(hw, port, XM_IMSK); msk &= ~XM_IS_TXF_UR; xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); /* get MMU Command Reg. */ cmd = xm_read16(hw, port, XM_MMU_CMD); if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) cmd |= XM_MMU_GMII_FD; /* * Workaround BCOM Errata (#10523) for all BCom Phys * Enable Power Management after link up */ if (hw->phy_type == SK_PHY_BCOM) { xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) & ~PHY_B_AC_DIS_PM); xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); } /* enable Rx/Tx */ xm_write16(hw, port, XM_MMU_CMD, cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); skge_link_up(skge); } static inline void bcom_phy_intr(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 isrc; isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); netif_printk(skge, intr, KERN_DEBUG, skge->netdev, "phy interrupt status 0x%x\n", isrc); if (isrc & PHY_B_IS_PSE) pr_err("%s: uncorrectable pair swap error\n", hw->dev[port]->name); /* Workaround BCom Errata: * enable and disable loopback mode if "NO HCD" occurs. */ if (isrc & PHY_B_IS_NO_HDCL) { u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl | PHY_CT_LOOP); xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl & ~PHY_CT_LOOP); } if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) bcom_check_link(hw, port); } static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) { int i; gma_write16(hw, port, GM_SMI_DATA, val); gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); for (i = 0; i < PHY_RETRIES; i++) { udelay(1); if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) return 0; } pr_warn("%s: phy write timeout\n", hw->dev[port]->name); return -EIO; } static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) { int i; gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); for (i = 0; i < PHY_RETRIES; i++) { udelay(1); if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) goto ready; } return -ETIMEDOUT; ready: *val = gma_read16(hw, port, GM_SMI_DATA); return 0; } static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__gm_phy_read(hw, port, reg, &v)) pr_warn("%s: phy read timeout\n", hw->dev[port]->name); return v; } /* Marvell Phy Initialization */ static void yukon_init(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); u16 ctrl, ct1000, adv; if (skge->autoneg == AUTONEG_ENABLE) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); } ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); if (skge->autoneg == AUTONEG_DISABLE) ctrl &= ~PHY_CT_ANE; ctrl |= PHY_CT_RESET; gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); ctrl = 0; ct1000 = 0; adv = PHY_AN_CSMA; if (skge->autoneg == AUTONEG_ENABLE) { if (hw->copper) { if (skge->advertising & ADVERTISED_1000baseT_Full) ct1000 |= PHY_M_1000C_AFD; if (skge->advertising & ADVERTISED_1000baseT_Half) ct1000 |= PHY_M_1000C_AHD; if (skge->advertising & ADVERTISED_100baseT_Full) adv |= PHY_M_AN_100_FD; if (skge->advertising & ADVERTISED_100baseT_Half) adv |= PHY_M_AN_100_HD; if (skge->advertising & ADVERTISED_10baseT_Full) adv |= PHY_M_AN_10_FD; if (skge->advertising & ADVERTISED_10baseT_Half) adv |= PHY_M_AN_10_HD; /* Set Flow-control capabilities */ adv |= phy_pause_map[skge->flow_control]; } else { if (skge->advertising & ADVERTISED_1000baseT_Full) adv |= PHY_M_AN_1000X_AFD; if (skge->advertising & ADVERTISED_1000baseT_Half) adv |= PHY_M_AN_1000X_AHD; adv |= fiber_pause_map[skge->flow_control]; } /* Restart Auto-negotiation */ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; } else { /* forced speed/duplex settings */ ct1000 = PHY_M_1000C_MSE; if (skge->duplex == DUPLEX_FULL) ctrl |= PHY_CT_DUP_MD; switch (skge->speed) { case SPEED_1000: ctrl |= PHY_CT_SP1000; break; case SPEED_100: ctrl |= PHY_CT_SP100; break; } ctrl |= PHY_CT_RESET; } gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); /* Enable phy interrupt on autonegotiation complete (or link up) */ if (skge->autoneg == AUTONEG_ENABLE) gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); else gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); } static void yukon_reset(struct skge_hw *hw, int port) { gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ gma_write16(hw, port, GM_MC_ADDR_H2, 0); gma_write16(hw, port, GM_MC_ADDR_H3, 0); gma_write16(hw, port, GM_MC_ADDR_H4, 0); gma_write16(hw, port, GM_RX_CTRL, gma_read16(hw, port, GM_RX_CTRL) | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); } /* Apparently, early versions of Yukon-Lite had wrong chip_id? */ static int is_yukon_lite_a0(struct skge_hw *hw) { u32 reg; int ret; if (hw->chip_id != CHIP_ID_YUKON) return 0; reg = skge_read32(hw, B2_FAR); skge_write8(hw, B2_FAR + 3, 0xff); ret = (skge_read8(hw, B2_FAR + 3) != 0); skge_write32(hw, B2_FAR, reg); return ret; } static void yukon_mac_init(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); int i; u32 reg; const u8 *addr = hw->dev[port]->dev_addr; /* WA code for COMA mode -- set PHY reset */ if (hw->chip_id == CHIP_ID_YUKON_LITE && hw->chip_rev >= CHIP_REV_YU_LITE_A3) { reg = skge_read32(hw, B2_GP_IO); reg |= GP_DIR_9 | GP_IO_9; skge_write32(hw, B2_GP_IO, reg); } /* hard reset */ skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); /* WA code for COMA mode -- clear PHY reset */ if (hw->chip_id == CHIP_ID_YUKON_LITE && hw->chip_rev >= CHIP_REV_YU_LITE_A3) { reg = skge_read32(hw, B2_GP_IO); reg |= GP_DIR_9; reg &= ~GP_IO_9; skge_write32(hw, B2_GP_IO, reg); } /* Set hardware config mode */ reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; /* Clear GMC reset */ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); if (skge->autoneg == AUTONEG_DISABLE) { reg = GM_GPCR_AU_ALL_DIS; gma_write16(hw, port, GM_GP_CTRL, gma_read16(hw, port, GM_GP_CTRL) | reg); switch (skge->speed) { case SPEED_1000: reg &= ~GM_GPCR_SPEED_100; reg |= GM_GPCR_SPEED_1000; break; case SPEED_100: reg &= ~GM_GPCR_SPEED_1000; reg |= GM_GPCR_SPEED_100; break; case SPEED_10: reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); break; } if (skge->duplex == DUPLEX_FULL) reg |= GM_GPCR_DUP_FULL; } else reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; switch (skge->flow_control) { case FLOW_MODE_NONE: skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; break; case FLOW_MODE_LOC_SEND: /* disable Rx flow-control */ reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; break; case FLOW_MODE_SYMMETRIC: case FLOW_MODE_SYM_OR_REM: /* enable Tx & Rx flow-control */ break; } gma_write16(hw, port, GM_GP_CTRL, reg); skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); yukon_init(hw, port); /* MIB clear */ reg = gma_read16(hw, port, GM_PHY_ADDR); gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); for (i = 0; i < GM_MIB_CNT_SIZE; i++) gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); gma_write16(hw, port, GM_PHY_ADDR, reg); /* transmit control */ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); /* receive control reg: unicast + multicast + no FCS */ gma_write16(hw, port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); /* transmit flow control */ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); /* transmit parameter */ gma_write16(hw, port, GM_TX_PARAM, TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); /* configure the Serial Mode Register */ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); if (hw->dev[port]->mtu > ETH_DATA_LEN) reg |= GM_SMOD_JUMBO_ENA; gma_write16(hw, port, GM_SERIAL_MODE, reg); /* physical address: used for pause frames */ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); /* virtual address for data */ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); /* enable interrupt mask for counter overflows */ gma_write16(hw, port, GM_TX_IRQ_MSK, 0); gma_write16(hw, port, GM_RX_IRQ_MSK, 0); gma_write16(hw, port, GM_TR_IRQ_MSK, 0); /* Initialize Mac Fifo */ /* Configure Rx MAC FIFO */ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); reg = GMF_OPER_ON | GMF_RX_F_FL_ON; /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ if (is_yukon_lite_a0(hw)) reg &= ~GMF_RX_F_FL_ON; skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); /* * because Pause Packet Truncation in GMAC is not working * we have to increase the Flush Threshold to 64 bytes * in order to flush pause packets in Rx FIFO on Yukon-1 */ skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); /* Configure Tx MAC FIFO */ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); } /* Go into power down mode */ static void yukon_suspend(struct skge_hw *hw, int port) { u16 ctrl; ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); ctrl |= PHY_M_PC_POL_R_DIS; gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); ctrl |= PHY_CT_RESET; gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); /* switch IEEE compatible power down mode on */ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); ctrl |= PHY_CT_PDOWN; gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); } static void yukon_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); yukon_reset(hw, port); gma_write16(hw, port, GM_GP_CTRL, gma_read16(hw, port, GM_GP_CTRL) & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); gma_read16(hw, port, GM_GP_CTRL); yukon_suspend(hw, port); /* set GPHY Control reset */ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); } static void yukon_get_stats(struct skge_port *skge, u64 *data) { struct skge_hw *hw = skge->hw; int port = skge->port; int i; data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 | gma_read32(hw, port, GM_TXO_OK_LO); data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 | gma_read32(hw, port, GM_RXO_OK_LO); for (i = 2; i < ARRAY_SIZE(skge_stats); i++) data[i] = gma_read32(hw, port, skge_stats[i].gma_offset); } static void yukon_mac_intr(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); netif_printk(skge, intr, KERN_DEBUG, skge->netdev, "mac interrupt status 0x%x\n", status); if (status & GM_IS_RX_FF_OR) { ++dev->stats.rx_fifo_errors; skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { ++dev->stats.tx_fifo_errors; skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); } } static u16 yukon_speed(const struct skge_hw *hw, u16 aux) { switch (aux & PHY_M_PS_SPEED_MSK) { case PHY_M_PS_SPEED_1000: return SPEED_1000; case PHY_M_PS_SPEED_100: return SPEED_100; default: return SPEED_10; } } static void yukon_link_up(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 reg; /* Enable Transmit FIFO Underrun */ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); reg = gma_read16(hw, port, GM_GP_CTRL); if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) reg |= GM_GPCR_DUP_FULL; /* enable Rx/Tx */ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; gma_write16(hw, port, GM_GP_CTRL, reg); gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); skge_link_up(skge); } static void yukon_link_down(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; u16 ctrl; ctrl = gma_read16(hw, port, GM_GP_CTRL); ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); gma_write16(hw, port, GM_GP_CTRL, ctrl); if (skge->flow_status == FLOW_STAT_REM_SEND) { ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); ctrl |= PHY_M_AN_ASP; /* restore Asymmetric Pause bit */ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); } skge_link_down(skge); yukon_init(hw, port); } static void yukon_phy_intr(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; const char *reason = NULL; u16 istatus, phystat; istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); netif_printk(skge, intr, KERN_DEBUG, skge->netdev, "phy interrupt status 0x%x 0x%x\n", istatus, phystat); if (istatus & PHY_M_IS_AN_COMPL) { if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) & PHY_M_AN_RF) { reason = "remote fault"; goto failed; } if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { reason = "master/slave fault"; goto failed; } if (!(phystat & PHY_M_PS_SPDUP_RES)) { reason = "speed/duplex"; goto failed; } skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; skge->speed = yukon_speed(hw, phystat); /* We are using IEEE 802.3z/D5.0 Table 37-4 */ switch (phystat & PHY_M_PS_PAUSE_MSK) { case PHY_M_PS_PAUSE_MSK: skge->flow_status = FLOW_STAT_SYMMETRIC; break; case PHY_M_PS_RX_P_EN: skge->flow_status = FLOW_STAT_REM_SEND; break; case PHY_M_PS_TX_P_EN: skge->flow_status = FLOW_STAT_LOC_SEND; break; default: skge->flow_status = FLOW_STAT_NONE; } if (skge->flow_status == FLOW_STAT_NONE || (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); else skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); yukon_link_up(skge); return; } if (istatus & PHY_M_IS_LSP_CHANGE) skge->speed = yukon_speed(hw, phystat); if (istatus & PHY_M_IS_DUP_CHANGE) skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; if (istatus & PHY_M_IS_LST_CHANGE) { if (phystat & PHY_M_PS_LINK_UP) yukon_link_up(skge); else yukon_link_down(skge); } return; failed: pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); /* XXX restart autonegotiation? */ } static void skge_phy_reset(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; struct net_device *dev = hw->dev[port]; netif_stop_queue(skge->netdev); netif_carrier_off(skge->netdev); spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) { genesis_reset(hw, port); genesis_mac_init(hw, port); } else { yukon_reset(hw, port); yukon_init(hw, port); } spin_unlock_bh(&hw->phy_lock); skge_set_multicast(dev); } /* Basic MII support */ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int err = -EOPNOTSUPP; if (!netif_running(dev)) return -ENODEV; /* Phy still in reset */ switch (cmd) { case SIOCGMIIPHY: data->phy_id = hw->phy_addr; fallthrough; case SIOCGMIIREG: { u16 val = 0; spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); else err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); spin_unlock_bh(&hw->phy_lock); data->val_out = val; break; } case SIOCSMIIREG: spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, data->val_in); else err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&hw->phy_lock); break; } return err; } static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) { u32 end; start /= 8; len /= 8; end = start + len - 1; skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); skge_write32(hw, RB_ADDR(q, RB_START), start); skge_write32(hw, RB_ADDR(q, RB_WP), start); skge_write32(hw, RB_ADDR(q, RB_RP), start); skge_write32(hw, RB_ADDR(q, RB_END), end); if (q == Q_R1 || q == Q_R2) { /* Set thresholds on receive queue's */ skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), start + (2*len)/3); skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), start + (len/3)); } else { /* Enable store & forward on Tx queue's because * Tx FIFO is only 4K on Genesis and 1K on Yukon */ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); } skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); } /* Setup Bus Memory Interface */ static void skge_qset(struct skge_port *skge, u16 q, const struct skge_element *e) { struct skge_hw *hw = skge->hw; u32 watermark = 0x600; u64 base = skge->dma + (e->desc - skge->mem); /* optimization to reduce window on 32bit/33mhz */ if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) watermark /= 2; skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); skge_write32(hw, Q_ADDR(q, Q_F), watermark); skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); } static int skge_up(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; u32 chunk, ram_addr; size_t rx_size, tx_size; int err; if (!is_valid_ether_addr(dev->dev_addr)) return -EINVAL; netif_info(skge, ifup, skge->netdev, "enabling interface\n"); if (dev->mtu > RX_BUF_SIZE) skge->rx_buf_size = dev->mtu + ETH_HLEN; else skge->rx_buf_size = RX_BUF_SIZE; rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); skge->mem_size = tx_size + rx_size; skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, &skge->dma, GFP_KERNEL); if (!skge->mem) return -ENOMEM; BUG_ON(skge->dma & 7); if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); err = -EINVAL; goto free_pci_mem; } err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); if (err) goto free_pci_mem; err = skge_rx_fill(dev); if (err) goto free_rx_ring; err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, skge->dma + rx_size); if (err) goto free_rx_ring; if (hw->ports == 1) { err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); if (err) { netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", hw->pdev->irq, err); goto free_tx_ring; } } /* Initialize MAC */ netif_carrier_off(dev); spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) genesis_mac_init(hw, port); else yukon_mac_init(hw, port); spin_unlock_bh(&hw->phy_lock); /* Configure RAMbuffers - equally between ports and tx/rx */ chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); ram_addr = hw->ram_offset + 2 * chunk * port; skge_ramset(hw, rxqaddr[port], ram_addr, chunk); skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); /* Start receiver BMU */ wmb(); skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); skge_led(skge, LED_MODE_ON); spin_lock_irq(&hw->hw_lock); hw->intr_mask |= portmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); napi_enable(&skge->napi); skge_set_multicast(dev); return 0; free_tx_ring: kfree(skge->tx_ring.start); free_rx_ring: skge_rx_clean(skge); kfree(skge->rx_ring.start); free_pci_mem: dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, skge->dma); skge->mem = NULL; return err; } /* stop receiver */ static void skge_rx_stop(struct skge_hw *hw, int port) { skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), RB_RST_SET|RB_DIS_OP_MD); skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); } static int skge_down(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; if (!skge->mem) return 0; netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); netif_tx_disable(dev); if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) del_timer_sync(&skge->link_timer); napi_disable(&skge->napi); netif_carrier_off(dev); spin_lock_irq(&hw->hw_lock); hw->intr_mask &= ~portmask[port]; skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); if (hw->ports == 1) free_irq(hw->pdev->irq, hw); skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); if (is_genesis(hw)) genesis_stop(skge); else yukon_stop(skge); /* Stop transmitter */ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET|RB_DIS_OP_MD); /* Disable Force Sync bit and Enable Alloc bit */ skge_write8(hw, SK_REG(port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); /* Stop Interval Timer and Limit Counter of Tx Arbiter */ skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); /* Reset PCI FIFO */ skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); /* Reset the RAM Buffer async Tx queue */ skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); skge_rx_stop(hw, port); if (is_genesis(hw)) { skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); } else { skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); } skge_led(skge, LED_MODE_OFF); netif_tx_lock_bh(dev); skge_tx_clean(dev); netif_tx_unlock_bh(dev); skge_rx_clean(skge); kfree(skge->rx_ring.start); kfree(skge->tx_ring.start); dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, skge->dma); skge->mem = NULL; return 0; } static inline int skge_avail(const struct skge_ring *ring) { smp_mb(); return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + (ring->to_clean - ring->to_use) - 1; } static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; struct skge_element *e; struct skge_tx_desc *td; int i; u32 control, len; dma_addr_t map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) return NETDEV_TX_BUSY; e = skge->tx_ring.to_use; td = e->desc; BUG_ON(td->control & BMU_OWN); e->skb = skb; len = skb_headlen(skb); map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, map)) goto mapping_error; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); td->dma_lo = lower_32_bits(map); td->dma_hi = upper_32_bits(map); if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); /* This seems backwards, but it is what the sk98lin * does. Looks like hardware is wrong? */ if (ipip_hdr(skb)->protocol == IPPROTO_UDP && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) control = BMU_TCP_CHECK; else control = BMU_UDP_CHECK; td->csum_offs = 0; td->csum_start = offset; td->csum_write = offset + skb->csum_offset; } else control = BMU_CHECK; if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ control |= BMU_EOF | BMU_IRQ_EOF; else { struct skge_tx_desc *tf = td; control |= BMU_STFWD; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&hw->pdev->dev, map)) goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); tf->dma_lo = lower_32_bits(map); tf->dma_hi = upper_32_bits(map); dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); } tf->control |= BMU_EOF | BMU_IRQ_EOF; } /* Make sure all the descriptors written */ wmb(); td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; wmb(); netdev_sent_queue(dev, skb->len); skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, "tx queued, slot %td, len %d\n", e - skge->tx_ring.start, skb->len); skge->tx_ring.to_use = e->next; smp_wmb(); if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { netdev_dbg(dev, "transmit queue full\n"); netif_stop_queue(dev); } return NETDEV_TX_OK; mapping_unwind: e = skge->tx_ring.to_use; dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_TO_DEVICE); while (i-- > 0) { e = e->next; dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_TO_DEVICE); } mapping_error: if (net_ratelimit()) dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Free resources associated with this reing element */ static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, u32 control) { /* skb header vs. fragment */ if (control & BMU_STF) dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_TO_DEVICE); else dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_TO_DEVICE); } /* Free all buffers in transmit ring */ static void skge_tx_clean(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_element *e; for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { struct skge_tx_desc *td = e->desc; skge_tx_unmap(skge->hw->pdev, e, td->control); if (td->control & BMU_EOF) dev_kfree_skb(e->skb); td->control = 0; } netdev_reset_queue(dev); skge->tx_ring.to_clean = e; } static void skge_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct skge_port *skge = netdev_priv(dev); netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); skge_tx_clean(dev); netif_wake_queue(dev); } static int skge_change_mtu(struct net_device *dev, int new_mtu) { int err; if (!netif_running(dev)) { dev->mtu = new_mtu; return 0; } skge_down(dev); dev->mtu = new_mtu; err = skge_up(dev); if (err) dev_close(dev); return err; } static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; static void genesis_add_filter(u8 filter[8], const u8 *addr) { u32 crc, bit; crc = ether_crc_le(ETH_ALEN, addr); bit = ~crc & 0x3f; filter[bit/8] |= 1 << (bit%8); } static void genesis_set_multicast(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; struct netdev_hw_addr *ha; u32 mode; u8 filter[8]; mode = xm_read32(hw, port, XM_MODE); mode |= XM_MD_ENA_HASH; if (dev->flags & IFF_PROMISC) mode |= XM_MD_ENA_PROM; else mode &= ~XM_MD_ENA_PROM; if (dev->flags & IFF_ALLMULTI) memset(filter, 0xff, sizeof(filter)); else { memset(filter, 0, sizeof(filter)); if (skge->flow_status == FLOW_STAT_REM_SEND || skge->flow_status == FLOW_STAT_SYMMETRIC) genesis_add_filter(filter, pause_mc_addr); netdev_for_each_mc_addr(ha, dev) genesis_add_filter(filter, ha->addr); } xm_write32(hw, port, XM_MODE, mode); xm_outhash(hw, port, XM_HSM, filter); } static void yukon_add_filter(u8 filter[8], const u8 *addr) { u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; filter[bit / 8] |= 1 << (bit % 8); } static void yukon_set_multicast(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; struct netdev_hw_addr *ha; int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || skge->flow_status == FLOW_STAT_SYMMETRIC); u16 reg; u8 filter[8]; memset(filter, 0, sizeof(filter)); reg = gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscuous */ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); else if (dev->flags & IFF_ALLMULTI) /* all multicast */ memset(filter, 0xff, sizeof(filter)); else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ reg &= ~GM_RXCR_MCF_ENA; else { reg |= GM_RXCR_MCF_ENA; if (rx_pause) yukon_add_filter(filter, pause_mc_addr); netdev_for_each_mc_addr(ha, dev) yukon_add_filter(filter, ha->addr); } gma_write16(hw, port, GM_MC_ADDR_H1, (u16)filter[0] | ((u16)filter[1] << 8)); gma_write16(hw, port, GM_MC_ADDR_H2, (u16)filter[2] | ((u16)filter[3] << 8)); gma_write16(hw, port, GM_MC_ADDR_H3, (u16)filter[4] | ((u16)filter[5] << 8)); gma_write16(hw, port, GM_MC_ADDR_H4, (u16)filter[6] | ((u16)filter[7] << 8)); gma_write16(hw, port, GM_RX_CTRL, reg); } static inline u16 phy_length(const struct skge_hw *hw, u32 status) { if (is_genesis(hw)) return status >> XMR_FS_LEN_SHIFT; else return status >> GMR_FS_LEN_SHIFT; } static inline int bad_phy_status(const struct skge_hw *hw, u32 status) { if (is_genesis(hw)) return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; else return (status & GMR_FS_ANY_ERR) || (status & GMR_FS_RX_OK) == 0; } static void skge_set_multicast(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); if (is_genesis(skge->hw)) genesis_set_multicast(dev); else yukon_set_multicast(dev); } /* Get receive buffer from descriptor. * Handles copy of small buffers and reallocation failures */ static struct sk_buff *skge_rx_get(struct net_device *dev, struct skge_element *e, u32 control, u32 status, u16 csum) { struct skge_port *skge = netdev_priv(dev); struct sk_buff *skb; u16 len = control & BMU_BBC; netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, "rx slot %td status 0x%x len %d\n", e - skge->rx_ring.start, status, len); if (len > skge->rx_buf_size) goto error; if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) goto error; if (bad_phy_status(skge->hw, status)) goto error; if (phy_length(skge->hw, status) != len) goto error; if (len < RX_COPY_THRESHOLD) { skb = netdev_alloc_skb_ip_align(dev, len); if (!skb) goto resubmit; dma_sync_single_for_cpu(&skge->hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_FROM_DEVICE); skb_copy_from_linear_data(e->skb, skb->data, len); dma_sync_single_for_device(&skge->hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), DMA_FROM_DEVICE); skge_rx_reuse(e, skge->rx_buf_size); } else { struct skge_element ee; struct sk_buff *nskb; nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); if (!nskb) goto resubmit; ee = *e; skb = ee.skb; prefetch(skb->data); if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { dev_kfree_skb(nskb); goto resubmit; } dma_unmap_single(&skge->hw->pdev->dev, dma_unmap_addr(&ee, mapaddr), dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE); } skb_put(skb, len); if (dev->features & NETIF_F_RXCSUM) { skb->csum = le16_to_cpu(csum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->protocol = eth_type_trans(skb, dev); return skb; error: netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, "rx err, slot %td control 0x%x status 0x%x\n", e - skge->rx_ring.start, control, status); if (is_genesis(skge->hw)) { if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) dev->stats.rx_length_errors++; if (status & XMR_FS_FRA_ERR) dev->stats.rx_frame_errors++; if (status & XMR_FS_FCS_ERR) dev->stats.rx_crc_errors++; } else { if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) dev->stats.rx_length_errors++; if (status & GMR_FS_FRAGMENT) dev->stats.rx_frame_errors++; if (status & GMR_FS_CRC_ERR) dev->stats.rx_crc_errors++; } resubmit: skge_rx_reuse(e, skge->rx_buf_size); return NULL; } /* Free all buffers in Tx ring which are no longer owned by device */ static void skge_tx_done(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->tx_ring; struct skge_element *e; unsigned int bytes_compl = 0, pkts_compl = 0; skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); for (e = ring->to_clean; e != ring->to_use; e = e->next) { u32 control = ((const struct skge_tx_desc *) e->desc)->control; if (control & BMU_OWN) break; skge_tx_unmap(skge->hw->pdev, e, control); if (control & BMU_EOF) { netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, "tx done slot %td\n", e - skge->tx_ring.start); pkts_compl++; bytes_compl += e->skb->len; dev_consume_skb_any(e->skb); } } netdev_completed_queue(dev, pkts_compl, bytes_compl); skge->tx_ring.to_clean = e; /* Can run lockless until we need to synchronize to restart queue. */ smp_mb(); if (unlikely(netif_queue_stopped(dev) && skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { netif_tx_lock(dev); if (unlikely(netif_queue_stopped(dev) && skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { netif_wake_queue(dev); } netif_tx_unlock(dev); } } static int skge_poll(struct napi_struct *napi, int budget) { struct skge_port *skge = container_of(napi, struct skge_port, napi); struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; int work_done = 0; skge_tx_done(dev); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; u32 control; rmb(); control = rd->control; if (control & BMU_OWN) break; skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); if (likely(skb)) { napi_gro_receive(napi, skb); ++work_done; } } ring->to_clean = e; /* restart receiver */ wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; spin_lock_irqsave(&hw->hw_lock, flags); hw->intr_mask |= napimask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); spin_unlock_irqrestore(&hw->hw_lock, flags); } return work_done; } /* Parity errors seem to happen when Genesis is connected to a switch * with no other ports present. Heartbeat error?? */ static void skge_mac_parity(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; ++dev->stats.tx_heartbeat_errors; if (is_genesis(hw)) skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_PERR); else /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); } static void skge_mac_intr(struct skge_hw *hw, int port) { if (is_genesis(hw)) genesis_mac_intr(hw, port); else yukon_mac_intr(hw, port); } /* Handle device specific framing and timeout interrupts */ static void skge_error_irq(struct skge_hw *hw) { struct pci_dev *pdev = hw->pdev; u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); if (is_genesis(hw)) { /* clear xmac errors */ if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); } else { /* Timestamp (unused) overflow */ if (hwstatus & IS_IRQ_TIST_OV) skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); } if (hwstatus & IS_RAM_RD_PAR) { dev_err(&pdev->dev, "Ram read data parity error\n"); skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); } if (hwstatus & IS_RAM_WR_PAR) { dev_err(&pdev->dev, "Ram write data parity error\n"); skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); } if (hwstatus & IS_M1_PAR_ERR) skge_mac_parity(hw, 0); if (hwstatus & IS_M2_PAR_ERR) skge_mac_parity(hw, 1); if (hwstatus & IS_R1_PAR_ERR) { dev_err(&pdev->dev, "%s: receive queue parity error\n", hw->dev[0]->name); skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); } if (hwstatus & IS_R2_PAR_ERR) { dev_err(&pdev->dev, "%s: receive queue parity error\n", hw->dev[1]->name); skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); } if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { u16 pci_status, pci_cmd; pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); pci_read_config_word(pdev, PCI_STATUS, &pci_status); dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", pci_cmd, pci_status); /* Write the error bits back to clear them. */ pci_status &= PCI_STATUS_ERROR_BITS; skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_write_config_word(pdev, PCI_COMMAND, pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); pci_write_config_word(pdev, PCI_STATUS, pci_status); skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* if error still set then just ignore it */ hwstatus = skge_read32(hw, B0_HWE_ISRC); if (hwstatus & IS_IRQ_STAT) { dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); hw->intr_mask &= ~IS_HW_ERR; } } } /* * Interrupt from PHY are handled in tasklet (softirq) * because accessing phy registers requires spin wait which might * cause excess interrupt latency. */ static void skge_extirq(struct tasklet_struct *t) { struct skge_hw *hw = from_tasklet(hw, t, phy_task); int port; for (port = 0; port < hw->ports; port++) { struct net_device *dev = hw->dev[port]; if (netif_running(dev)) { struct skge_port *skge = netdev_priv(dev); spin_lock(&hw->phy_lock); if (!is_genesis(hw)) yukon_phy_intr(skge); else if (hw->phy_type == SK_PHY_BCOM) bcom_phy_intr(skge); spin_unlock(&hw->phy_lock); } } spin_lock_irq(&hw->hw_lock); hw->intr_mask |= IS_EXT_REG; skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); spin_unlock_irq(&hw->hw_lock); } static irqreturn_t skge_intr(int irq, void *dev_id) { struct skge_hw *hw = dev_id; u32 status; int handled = 0; spin_lock(&hw->hw_lock); /* Reading this register masks IRQ */ status = skge_read32(hw, B0_SP_ISRC); if (status == 0 || status == ~0) goto out; handled = 1; status &= hw->intr_mask; if (status & IS_EXT_REG) { hw->intr_mask &= ~IS_EXT_REG; tasklet_schedule(&hw->phy_task); } if (status & (IS_XA1_F|IS_R1_F)) { struct skge_port *skge = netdev_priv(hw->dev[0]); hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); napi_schedule(&skge->napi); } if (status & IS_PA_TO_TX1) skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); if (status & IS_PA_TO_RX1) { ++hw->dev[0]->stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); } if (status & IS_MAC1) skge_mac_intr(hw, 0); if (hw->dev[1]) { struct skge_port *skge = netdev_priv(hw->dev[1]); if (status & (IS_XA2_F|IS_R2_F)) { hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); napi_schedule(&skge->napi); } if (status & IS_PA_TO_RX2) { ++hw->dev[1]->stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); } if (status & IS_PA_TO_TX2) skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); if (status & IS_MAC2) skge_mac_intr(hw, 1); } if (status & IS_HW_ERR) skge_error_irq(hw); out: skge_write32(hw, B0_IMSK, hw->intr_mask); skge_read32(hw, B0_IMSK); spin_unlock(&hw->hw_lock); return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER static void skge_netpoll(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); disable_irq(dev->irq); skge_intr(dev->irq, skge->hw); enable_irq(dev->irq); } #endif static int skge_set_mac_address(struct net_device *dev, void *p) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; unsigned port = skge->port; const struct sockaddr *addr = p; u16 ctrl; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, addr->sa_data); if (!netif_running(dev)) { memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); } else { /* disable Rx */ spin_lock_bh(&hw->phy_lock); ctrl = gma_read16(hw, port, GM_GP_CTRL); gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); if (is_genesis(hw)) xm_outaddr(hw, port, XM_SA, dev->dev_addr); else { gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); } gma_write16(hw, port, GM_GP_CTRL, ctrl); spin_unlock_bh(&hw->phy_lock); } return 0; } static const struct { u8 id; const char *name; } skge_chips[] = { { CHIP_ID_GENESIS, "Genesis" }, { CHIP_ID_YUKON, "Yukon" }, { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, { CHIP_ID_YUKON_LP, "Yukon-LP"}, }; static const char *skge_board_name(const struct skge_hw *hw) { int i; static char buf[16]; for (i = 0; i < ARRAY_SIZE(skge_chips); i++) if (skge_chips[i].id == hw->chip_id) return skge_chips[i].name; snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); return buf; } /* * Setup the board data structure, but don't bring up * the port(s) */ static int skge_reset(struct skge_hw *hw) { u32 reg; u16 ctst, pci_status; u8 t8, mac_cfg, pmd_type; int i; ctst = skge_read16(hw, B0_CTST); /* do a SW reset */ skge_write8(hw, B0_CTST, CS_RST_SET); skge_write8(hw, B0_CTST, CS_RST_CLR); /* clear PCI errors, if any */ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); skge_write8(hw, B2_TST_CTRL2, 0); pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); pci_write_config_word(hw->pdev, PCI_STATUS, pci_status | PCI_STATUS_ERROR_BITS); skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); skge_write8(hw, B0_CTST, CS_MRST_CLR); /* restore CLK_RUN bits (for Yukon-Lite) */ skge_write16(hw, B0_CTST, ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); hw->chip_id = skge_read8(hw, B2_CHIP_ID); hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; pmd_type = skge_read8(hw, B2_PMD_TYP); hw->copper = (pmd_type == 'T' || pmd_type == '1'); switch (hw->chip_id) { case CHIP_ID_GENESIS: #ifdef CONFIG_SKGE_GENESIS switch (hw->phy_type) { case SK_PHY_XMAC: hw->phy_addr = PHY_ADDR_XMAC; break; case SK_PHY_BCOM: hw->phy_addr = PHY_ADDR_BCOM; break; default: dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", hw->phy_type); return -EOPNOTSUPP; } break; #else dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); return -EOPNOTSUPP; #endif case CHIP_ID_YUKON: case CHIP_ID_YUKON_LITE: case CHIP_ID_YUKON_LP: if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') hw->copper = 1; hw->phy_addr = PHY_ADDR_MARV; break; default: dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", hw->chip_id); return -EOPNOTSUPP; } mac_cfg = skge_read8(hw, B2_MAC_CFG); hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; /* read the adapters RAM size */ t8 = skge_read8(hw, B2_E_0); if (is_genesis(hw)) { if (t8 == 3) { /* special case: 4 x 64k x 36, offset = 0x80000 */ hw->ram_size = 0x100000; hw->ram_offset = 0x80000; } else hw->ram_size = t8 * 512; } else if (t8 == 0) hw->ram_size = 0x20000; else hw->ram_size = t8 * 4096; hw->intr_mask = IS_HW_ERR; /* Use PHY IRQ for all but fiber based Genesis board */ if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) hw->intr_mask |= IS_EXT_REG; if (is_genesis(hw)) genesis_init(hw); else { /* switch power to VCC (WA for VAUX problem) */ skge_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); /* avoid boards with stuck Hardware error bits */ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); hw->intr_mask &= ~IS_HW_ERR; } /* Clear PHY COMA */ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg); reg &= ~PCI_PHY_COMA; pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); for (i = 0; i < hw->ports; i++) { skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); } } /* turn off hardware timer (unused) */ skge_write8(hw, B2_TI_CTRL, TIM_STOP); skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); skge_write8(hw, B0_LED, LED_STAT_ON); /* enable the Tx Arbiters */ for (i = 0; i < hw->ports; i++) skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); /* Initialize ram interface */ skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); /* Set interrupt moderation for Transmit only * Receive interrupts avoided by NAPI */ skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); skge_write32(hw, B2_IRQM_CTRL, TIM_START); /* Leave irq disabled until first port is brought up. */ skge_write32(hw, B0_IMSK, 0); for (i = 0; i < hw->ports; i++) { if (is_genesis(hw)) genesis_reset(hw, i); else yukon_reset(hw, i); } return 0; } #ifdef CONFIG_SKGE_DEBUG static struct dentry *skge_debug; static int skge_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; const struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; const struct skge_element *e; if (!netif_running(dev)) return -ENETDOWN; seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), skge_read32(hw, B0_IMSK)); seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { const struct skge_tx_desc *t = e->desc; seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", t->control, t->dma_hi, t->dma_lo, t->status, t->csum_offs, t->csum_write, t->csum_start); } seq_puts(seq, "\nRx Ring:\n"); for (e = skge->rx_ring.to_clean; ; e = e->next) { const struct skge_rx_desc *r = e->desc; if (r->control & BMU_OWN) break; seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", r->control, r->dma_hi, r->dma_lo, r->status, r->timestamp, r->csum1, r->csum1_start); } return 0; } DEFINE_SHOW_ATTRIBUTE(skge_debug); /* * Use network device events to create/remove/rename * debugfs file entries */ static int skge_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct skge_port *skge; if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) goto done; skge = netdev_priv(dev); switch (event) { case NETDEV_CHANGENAME: if (skge->debugfs) skge->debugfs = debugfs_rename(skge_debug, skge->debugfs, skge_debug, dev->name); break; case NETDEV_GOING_DOWN: debugfs_remove(skge->debugfs); skge->debugfs = NULL; break; case NETDEV_UP: skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, dev, &skge_debug_fops); break; } done: return NOTIFY_DONE; } static struct notifier_block skge_notifier = { .notifier_call = skge_device_event, }; static __init void skge_debug_init(void) { skge_debug = debugfs_create_dir("skge", NULL); register_netdevice_notifier(&skge_notifier); } static __exit void skge_debug_cleanup(void) { if (skge_debug) { unregister_netdevice_notifier(&skge_notifier); debugfs_remove(skge_debug); skge_debug = NULL; } } #else #define skge_debug_init() #define skge_debug_cleanup() #endif static const struct net_device_ops skge_netdev_ops = { .ndo_open = skge_up, .ndo_stop = skge_down, .ndo_start_xmit = skge_xmit_frame, .ndo_eth_ioctl = skge_ioctl, .ndo_get_stats = skge_get_stats, .ndo_tx_timeout = skge_tx_timeout, .ndo_change_mtu = skge_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = skge_set_multicast, .ndo_set_mac_address = skge_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = skge_netpoll, #endif }; /* Initialize network device */ static struct net_device *skge_devinit(struct skge_hw *hw, int port, int highmem) { struct skge_port *skge; struct net_device *dev = alloc_etherdev(sizeof(*skge)); u8 addr[ETH_ALEN]; if (!dev) return NULL; SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->netdev_ops = &skge_netdev_ops; dev->ethtool_ops = &skge_ethtool_ops; dev->watchdog_timeo = TX_WATCHDOG; dev->irq = hw->pdev->irq; /* MTU range: 60 - 9000 */ dev->min_mtu = ETH_ZLEN; dev->max_mtu = ETH_JUMBO_MTU; if (highmem) dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); netif_napi_add(dev, &skge->napi, skge_poll); skge->netdev = dev; skge->hw = hw; skge->msg_enable = netif_msg_init(debug, default_msg); skge->tx_ring.count = DEFAULT_TX_RING_SIZE; skge->rx_ring.count = DEFAULT_RX_RING_SIZE; /* Auto speed and flow control */ skge->autoneg = AUTONEG_ENABLE; skge->flow_control = FLOW_MODE_SYM_OR_REM; skge->duplex = -1; skge->speed = -1; skge->advertising = skge_supported_modes(hw); if (device_can_wakeup(&hw->pdev->dev)) { skge->wol = wol_supported(hw) & WAKE_MAGIC; device_set_wakeup_enable(&hw->pdev->dev, skge->wol); } hw->dev[port] = dev; skge->port = port; /* Only used for Genesis XMAC */ if (is_genesis(hw)) timer_setup(&skge->link_timer, xm_link_timer, 0); else { dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; dev->features |= dev->hw_features; } /* read the mac address */ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); eth_hw_addr_set(dev, addr); return dev; } static void skge_show_addr(struct net_device *dev) { const struct skge_port *skge = netdev_priv(dev); netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); } static int only_32bit_dma; static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev, *dev1; struct skge_hw *hw; int err, using_dac = 0; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "cannot enable PCI device\n"); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_out_disable_pdev; } pci_set_master(pdev); if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { using_dac = 1; err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { using_dac = 0; err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_out_free_regions; } #ifdef __BIG_ENDIAN /* byte swap descriptors in hardware */ { u32 reg; pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); reg |= PCI_REV_DESC; pci_write_config_dword(pdev, PCI_DEV_REG2, reg); } #endif err = -ENOMEM; /* space for skge@pci:0000:04:00.0 */ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + strlen(pci_name(pdev)) + 1, GFP_KERNEL); if (!hw) goto err_out_free_regions; sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); hw->pdev = pdev; spin_lock_init(&hw->hw_lock); spin_lock_init(&hw->phy_lock); tasklet_setup(&hw->phy_task, skge_extirq); hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); if (!hw->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); goto err_out_free_hw; } err = skge_reset(hw); if (err) goto err_out_iounmap; pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, skge_board_name(hw), hw->chip_rev); dev = skge_devinit(hw, 0, using_dac); if (!dev) { err = -ENOMEM; goto err_out_led_off; } /* Some motherboards are broken and has zero in ROM. */ if (!is_valid_ether_addr(dev->dev_addr)) dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "cannot register net device\n"); goto err_out_free_netdev; } skge_show_addr(dev); if (hw->ports > 1) { dev1 = skge_devinit(hw, 1, using_dac); if (!dev1) { err = -ENOMEM; goto err_out_unregister; } err = register_netdev(dev1); if (err) { dev_err(&pdev->dev, "cannot register second net device\n"); goto err_out_free_dev1; } err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); if (err) { dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); goto err_out_unregister_dev1; } skge_show_addr(dev1); } pci_set_drvdata(pdev, hw); return 0; err_out_unregister_dev1: unregister_netdev(dev1); err_out_free_dev1: free_netdev(dev1); err_out_unregister: unregister_netdev(dev); err_out_free_netdev: free_netdev(dev); err_out_led_off: skge_write16(hw, B0_LED, LED_STAT_OFF); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: kfree(hw); err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); err_out: return err; } static void skge_remove(struct pci_dev *pdev) { struct skge_hw *hw = pci_get_drvdata(pdev); struct net_device *dev0, *dev1; if (!hw) return; dev1 = hw->dev[1]; if (dev1) unregister_netdev(dev1); dev0 = hw->dev[0]; unregister_netdev(dev0); tasklet_kill(&hw->phy_task); spin_lock_irq(&hw->hw_lock); hw->intr_mask = 0; if (hw->ports > 1) { skge_write32(hw, B0_IMSK, 0); skge_read32(hw, B0_IMSK); } spin_unlock_irq(&hw->hw_lock); skge_write16(hw, B0_LED, LED_STAT_OFF); skge_write8(hw, B0_CTST, CS_RST_SET); if (hw->ports > 1) free_irq(pdev->irq, hw); pci_release_regions(pdev); pci_disable_device(pdev); if (dev1) free_netdev(dev1); free_netdev(dev0); iounmap(hw->regs); kfree(hw); } #ifdef CONFIG_PM_SLEEP static int skge_suspend(struct device *dev) { struct skge_hw *hw = dev_get_drvdata(dev); int i; if (!hw) return 0; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct skge_port *skge = netdev_priv(dev); if (netif_running(dev)) skge_down(dev); if (skge->wol) skge_wol_init(skge); } skge_write32(hw, B0_IMSK, 0); return 0; } static int skge_resume(struct device *dev) { struct skge_hw *hw = dev_get_drvdata(dev); int i, err; if (!hw) return 0; err = skge_reset(hw); if (err) goto out; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; if (netif_running(dev)) { err = skge_up(dev); if (err) { netdev_err(dev, "could not up: %d\n", err); dev_close(dev); goto out; } } } out: return err; } static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); #define SKGE_PM_OPS (&skge_pm_ops) #else #define SKGE_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static void skge_shutdown(struct pci_dev *pdev) { struct skge_hw *hw = pci_get_drvdata(pdev); int i; if (!hw) return; for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct skge_port *skge = netdev_priv(dev); if (skge->wol) skge_wol_init(skge); } pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); } static struct pci_driver skge_driver = { .name = DRV_NAME, .id_table = skge_id_table, .probe = skge_probe, .remove = skge_remove, .shutdown = skge_shutdown, .driver.pm = SKGE_PM_OPS, }; static const struct dmi_system_id skge_32bit_dma_boards[] = { { .ident = "Gigabyte nForce boards", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, { .ident = "ASUS P5NSLI", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") }, }, { .ident = "FUJITSU SIEMENS A8NE-FM", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM") }, }, {} }; static int __init skge_init_module(void) { if (dmi_check_system(skge_32bit_dma_boards)) only_32bit_dma = 1; skge_debug_init(); return pci_register_driver(&skge_driver); } static void __exit skge_cleanup_module(void) { pci_unregister_driver(&skge_driver); skge_debug_cleanup(); } module_init(skge_init_module); module_exit(skge_cleanup_module);
linux-master
drivers/net/ethernet/marvell/skge.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PXA168 ethernet driver. * Most of the code is derived from mv643xx ethernet driver. * * Copyright (C) 2010 Marvell International Ltd. * Sachin Sanap <[email protected]> * Zhangfei Gao <[email protected]> * Philip Rakity <[email protected]> * Mark Brown <[email protected]> */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/pxa168_eth.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/udp.h> #include <linux/workqueue.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #define DRIVER_NAME "pxa168-eth" #define DRIVER_VERSION "0.3" /* * Registers */ #define PHY_ADDRESS 0x0000 #define SMI 0x0010 #define PORT_CONFIG 0x0400 #define PORT_CONFIG_EXT 0x0408 #define PORT_COMMAND 0x0410 #define PORT_STATUS 0x0418 #define HTPR 0x0428 #define MAC_ADDR_LOW 0x0430 #define MAC_ADDR_HIGH 0x0438 #define SDMA_CONFIG 0x0440 #define SDMA_CMD 0x0448 #define INT_CAUSE 0x0450 #define INT_W_CLEAR 0x0454 #define INT_MASK 0x0458 #define ETH_F_RX_DESC_0 0x0480 #define ETH_C_RX_DESC_0 0x04A0 #define ETH_C_TX_DESC_1 0x04E4 /* smi register */ #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ #define SMI_OP_W (0 << 26) /* Write operation */ #define SMI_OP_R (1 << 26) /* Read operation */ #define PHY_WAIT_ITERATIONS 10 #define PXA168_ETH_PHY_ADDR_DEFAULT 0 /* RX & TX descriptor command */ #define BUF_OWNED_BY_DMA (1 << 31) /* RX descriptor status */ #define RX_EN_INT (1 << 23) #define RX_FIRST_DESC (1 << 17) #define RX_LAST_DESC (1 << 16) #define RX_ERROR (1 << 15) /* TX descriptor command */ #define TX_EN_INT (1 << 23) #define TX_GEN_CRC (1 << 22) #define TX_ZERO_PADDING (1 << 18) #define TX_FIRST_DESC (1 << 17) #define TX_LAST_DESC (1 << 16) #define TX_ERROR (1 << 15) /* SDMA_CMD */ #define SDMA_CMD_AT (1 << 31) #define SDMA_CMD_TXDL (1 << 24) #define SDMA_CMD_TXDH (1 << 23) #define SDMA_CMD_AR (1 << 15) #define SDMA_CMD_ERD (1 << 7) /* Bit definitions of the Port Config Reg */ #define PCR_DUPLEX_FULL (1 << 15) #define PCR_HS (1 << 12) #define PCR_EN (1 << 7) #define PCR_PM (1 << 0) /* Bit definitions of the Port Config Extend Reg */ #define PCXR_2BSM (1 << 28) #define PCXR_DSCP_EN (1 << 21) #define PCXR_RMII_EN (1 << 20) #define PCXR_AN_SPEED_DIS (1 << 19) #define PCXR_SPEED_100 (1 << 18) #define PCXR_MFL_1518 (0 << 14) #define PCXR_MFL_1536 (1 << 14) #define PCXR_MFL_2048 (2 << 14) #define PCXR_MFL_64K (3 << 14) #define PCXR_FLOWCTL_DIS (1 << 12) #define PCXR_FLP (1 << 11) #define PCXR_AN_FLOWCTL_DIS (1 << 10) #define PCXR_AN_DUPLEX_DIS (1 << 9) #define PCXR_PRIO_TX_OFF 3 #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) /* Bit definitions of the SDMA Config Reg */ #define SDCR_BSZ_OFF 12 #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) #define SDCR_BLMR (1 << 6) #define SDCR_BLMT (1 << 7) #define SDCR_RIFB (1 << 9) #define SDCR_RC_OFF 2 #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) /* * Bit definitions of the Interrupt Cause Reg * and Interrupt MASK Reg is the same */ #define ICR_RXBUF (1 << 0) #define ICR_TXBUF_H (1 << 2) #define ICR_TXBUF_L (1 << 3) #define ICR_TXEND_H (1 << 6) #define ICR_TXEND_L (1 << 7) #define ICR_RXERR (1 << 8) #define ICR_TXERR_H (1 << 10) #define ICR_TXERR_L (1 << 11) #define ICR_TX_UDR (1 << 13) #define ICR_MII_CH (1 << 28) #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ ICR_TXERR_H | ICR_TXERR_L |\ ICR_TXEND_H | ICR_TXEND_L |\ ICR_RXBUF | ICR_RXERR | ICR_MII_CH) #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ #define NUM_RX_DESCS 64 #define NUM_TX_DESCS 64 #define HASH_ADD 0 #define HASH_DELETE 1 #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ #define HOP_NUMBER 12 /* Bit definitions for Port status */ #define PORT_SPEED_100 (1 << 0) #define FULL_DUPLEX (1 << 1) #define FLOW_CONTROL_DISABLED (1 << 2) #define LINK_UP (1 << 3) /* Bit definitions for work to be done */ #define WORK_TX_DONE (1 << 1) /* * Misc definitions. */ #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) struct rx_desc { u32 cmd_sts; /* Descriptor command status */ u16 byte_cnt; /* Descriptor buffer byte count */ u16 buf_size; /* Buffer size */ u32 buf_ptr; /* Descriptor buffer pointer */ u32 next_desc_ptr; /* Next descriptor pointer */ }; struct tx_desc { u32 cmd_sts; /* Command/status field */ u16 reserved; u16 byte_cnt; /* buffer byte count */ u32 buf_ptr; /* pointer to buffer for this descriptor */ u32 next_desc_ptr; /* Pointer to next descriptor */ }; struct pxa168_eth_private { struct platform_device *pdev; int port_num; /* User Ethernet port number */ int phy_addr; int phy_speed; int phy_duplex; phy_interface_t phy_intf; int rx_resource_err; /* Rx ring resource error flag */ /* Next available and first returning Rx resource */ int rx_curr_desc_q, rx_used_desc_q; /* Next available and first returning Tx resource */ int tx_curr_desc_q, tx_used_desc_q; struct rx_desc *p_rx_desc_area; dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb; struct tx_desc *p_tx_desc_area; dma_addr_t tx_desc_dma; int tx_desc_area_size; struct sk_buff **tx_skb; struct work_struct tx_timeout_task; struct net_device *dev; struct napi_struct napi; u8 work_todo; int skb_size; /* Size of Tx Ring per queue */ int tx_ring_size; /* Number of tx descriptors in use */ int tx_desc_count; /* Size of Rx Ring per queue */ int rx_ring_size; /* Number of rx descriptors in use */ int rx_desc_count; /* * Used in case RX Ring is empty, which can occur when * system does not have resources (skb's) */ struct timer_list timeout; struct mii_bus *smi_bus; /* clock */ struct clk *clk; struct pxa168_eth_platform_data *pd; /* * Ethernet controller base address. */ void __iomem *base; /* Pointer to the hardware address filter table */ void *htpr; dma_addr_t htpr_dma; }; struct addr_table_entry { __le32 lo; __le32 hi; }; /* Bit fields of a Hash Table Entry */ enum hash_table_entry { HASH_ENTRY_VALID = 1, SKIP = 2, HASH_ENTRY_RECEIVE_DISCARD = 4, HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; static int pxa168_init_hw(struct pxa168_eth_private *pep); static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); static void eth_port_start(struct net_device *dev); static int pxa168_eth_open(struct net_device *dev); static int pxa168_eth_stop(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { return readl_relaxed(pep->base + offset); } static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) { writel_relaxed(data, pep->base + offset); } static void abort_dma(struct pxa168_eth_private *pep) { int delay; int max_retries = 40; do { wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); udelay(100); delay = 10; while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) && delay-- > 0) { udelay(10); } } while (max_retries-- > 0 && delay <= 0); if (max_retries <= 0) netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); } static void rxq_refill(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct sk_buff *skb; struct rx_desc *p_used_rx_desc; int used_rx_desc; while (pep->rx_desc_count < pep->rx_ring_size) { int size; skb = netdev_alloc_skb(dev, pep->skb_size); if (!skb) break; if (SKB_DMA_REALIGN) skb_reserve(skb, SKB_DMA_REALIGN); pep->rx_desc_count++; /* Get 'used' Rx descriptor */ used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; size = skb_end_pointer(skb) - skb->data; p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, size, DMA_FROM_DEVICE); p_used_rx_desc->buf_size = size; pep->rx_skb[used_rx_desc] = skb; /* Return the descriptor to DMA ownership */ dma_wmb(); p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; dma_wmb(); /* Move the used descriptor pointer to the next descriptor */ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; /* Any Rx return cancels the Rx resource error status */ pep->rx_resource_err = 0; skb_reserve(skb, ETH_HW_IP_ALIGN); } /* * If RX ring is empty of SKB, set a timer to try allocating * again at a later time. */ if (pep->rx_desc_count == 0) { pep->timeout.expires = jiffies + (HZ / 10); add_timer(&pep->timeout); } } static inline void rxq_refill_timer_wrapper(struct timer_list *t) { struct pxa168_eth_private *pep = from_timer(pep, t, timeout); napi_schedule(&pep->napi); } static inline u8 flip_8_bits(u8 x) { return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); } static void nibble_swap_every_byte(unsigned char *mac_addr) { int i; for (i = 0; i < ETH_ALEN; i++) { mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | ((mac_addr[i] & 0xf0) >> 4); } } static void inverse_every_nibble(unsigned char *mac_addr) { int i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = flip_8_bits(mac_addr[i]); } /* * ---------------------------------------------------------------------------- * This function will calculate the hash function of the address. * Inputs * mac_addr_orig - MAC address. * Outputs * return the calculated entry. */ static u32 hash_function(const unsigned char *mac_addr_orig) { u32 hash_result; u32 addr0; u32 addr1; u32 addr2; u32 addr3; unsigned char mac_addr[ETH_ALEN]; /* Make a copy of MAC address since we are going to performe bit * operations on it */ memcpy(mac_addr, mac_addr_orig, ETH_ALEN); nibble_swap_every_byte(mac_addr); inverse_every_nibble(mac_addr); addr0 = (mac_addr[5] >> 2) & 0x3f; addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); hash_result = hash_result & 0x07ff; return hash_result; } /* * ---------------------------------------------------------------------------- * This function will add/del an entry to the address table. * Inputs * pep - ETHERNET . * mac_addr - MAC address. * skip - if 1, skip this address.Used in case of deleting an entry which is a * part of chain in the hash table.We can't just delete the entry since * that will break the chain.We need to defragment the tables time to * time. * rd - 0 Discard packet upon match. * - 1 Receive packet upon match. * Outputs * address table entry is added/deleted. * 0 if success. * -ENOSPC if table full */ static int add_del_hash_entry(struct pxa168_eth_private *pep, const unsigned char *mac_addr, u32 rd, u32 skip, int del) { struct addr_table_entry *entry, *start; u32 new_high; u32 new_low; u32 i; new_low = (((mac_addr[1] >> 4) & 0xf) << 15) | (((mac_addr[1] >> 0) & 0xf) << 11) | (((mac_addr[0] >> 4) & 0xf) << 7) | (((mac_addr[0] >> 0) & 0xf) << 3) | (((mac_addr[3] >> 4) & 0x1) << 31) | (((mac_addr[3] >> 0) & 0xf) << 27) | (((mac_addr[2] >> 4) & 0xf) << 23) | (((mac_addr[2] >> 0) & 0xf) << 19) | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) | HASH_ENTRY_VALID; new_high = (((mac_addr[5] >> 4) & 0xf) << 15) | (((mac_addr[5] >> 0) & 0xf) << 11) | (((mac_addr[4] >> 4) & 0xf) << 7) | (((mac_addr[4] >> 0) & 0xf) << 3) | (((mac_addr[3] >> 5) & 0x7) << 0); /* * Pick the appropriate table, start scanning for free/reusable * entries at the index obtained by hashing the specified MAC address */ start = pep->htpr; entry = start + hash_function(mac_addr); for (i = 0; i < HOP_NUMBER; i++) { if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { break; } else { /* if same address put in same position */ if (((le32_to_cpu(entry->lo) & 0xfffffff8) == (new_low & 0xfffffff8)) && (le32_to_cpu(entry->hi) == new_high)) { break; } } if (entry == start + 0x7ff) entry = start; else entry++; } if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && (le32_to_cpu(entry->hi) != new_high) && del) return 0; if (i == HOP_NUMBER) { if (!del) { netdev_info(pep->dev, "%s: table section is full, need to " "move to 16kB implementation?\n", __FILE__); return -ENOSPC; } else return 0; } /* * Update the selected entry */ if (del) { entry->hi = 0; entry->lo = 0; } else { entry->hi = cpu_to_le32(new_high); entry->lo = cpu_to_le32(new_low); } return 0; } /* * ---------------------------------------------------------------------------- * Create an addressTable entry from MAC address info * found in the specifed net_device struct * * Input : pointer to ethernet interface network device structure * Output : N/A */ static void update_hash_table_mac_address(struct pxa168_eth_private *pep, unsigned char *oaddr, const unsigned char *addr) { /* Delete old entry */ if (oaddr) add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); /* Add new entry */ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); } static int init_hash_table(struct pxa168_eth_private *pep) { /* * Hardware expects CPU to build a hash table based on a predefined * hash function and populate it based on hardware address. The * location of the hash table is identified by 32-bit pointer stored * in HTPR internal register. Two possible sizes exists for the hash * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB * (16kB of DRAM required (4 x 4 kB banks)).We currently only support * 1/2kB. */ /* TODO: Add support for 8kB hash table and alternative hash * function.Driver can dynamically switch to them if the 1/2kB hash * table is full. */ if (!pep->htpr) { pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, &pep->htpr_dma, GFP_KERNEL); if (!pep->htpr) return -ENOMEM; } else { memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); } wrl(pep, HTPR, pep->htpr_dma); return 0; } static void pxa168_eth_set_rx_mode(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct netdev_hw_addr *ha; u32 val; val = rdl(pep, PORT_CONFIG); if (dev->flags & IFF_PROMISC) val |= PCR_PM; else val &= ~PCR_PM; wrl(pep, PORT_CONFIG, val); /* * Remove the old list of MAC address and add dev->addr * and multicast address. */ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); update_hash_table_mac_address(pep, NULL, dev->dev_addr); netdev_for_each_mc_addr(ha, dev) update_hash_table_mac_address(pep, NULL, ha->addr); } static void pxa168_eth_get_mac_address(struct net_device *dev, unsigned char *addr) { struct pxa168_eth_private *pep = netdev_priv(dev); unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH); unsigned int mac_l = rdl(pep, MAC_ADDR_LOW); addr[0] = (mac_h >> 24) & 0xff; addr[1] = (mac_h >> 16) & 0xff; addr[2] = (mac_h >> 8) & 0xff; addr[3] = mac_h & 0xff; addr[4] = (mac_l >> 8) & 0xff; addr[5] = mac_l & 0xff; } static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; struct pxa168_eth_private *pep = netdev_priv(dev); unsigned char oldMac[ETH_ALEN]; u32 mac_h, mac_l; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); eth_hw_addr_set(dev, sa->sa_data); mac_h = dev->dev_addr[0] << 24; mac_h |= dev->dev_addr[1] << 16; mac_h |= dev->dev_addr[2] << 8; mac_h |= dev->dev_addr[3]; mac_l = dev->dev_addr[4] << 8; mac_l |= dev->dev_addr[5]; wrl(pep, MAC_ADDR_HIGH, mac_h); wrl(pep, MAC_ADDR_LOW, mac_l); netif_addr_lock_bh(dev); update_hash_table_mac_address(pep, oldMac, dev->dev_addr); netif_addr_unlock_bh(dev); return 0; } static void eth_port_start(struct net_device *dev) { unsigned int val = 0; struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; phy_start(dev->phydev); /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; wrl(pep, ETH_C_TX_DESC_1, (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); /* Assignment of Rx CRDP of given queue */ rx_curr_desc = pep->rx_curr_desc_q; wrl(pep, ETH_C_RX_DESC_0, (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); wrl(pep, ETH_F_RX_DESC_0, (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); /* Clear all interrupts */ wrl(pep, INT_CAUSE, 0); /* Enable all interrupts for receive, transmit and error. */ wrl(pep, INT_MASK, ALL_INTS); val = rdl(pep, PORT_CONFIG); val |= PCR_EN; wrl(pep, PORT_CONFIG, val); /* Start RX DMA engine */ val = rdl(pep, SDMA_CMD); val |= SDMA_CMD_ERD; wrl(pep, SDMA_CMD, val); } static void eth_port_reset(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); unsigned int val = 0; /* Stop all interrupts for receive, transmit and error. */ wrl(pep, INT_MASK, 0); /* Clear all interrupts */ wrl(pep, INT_CAUSE, 0); /* Stop RX DMA */ val = rdl(pep, SDMA_CMD); val &= ~SDMA_CMD_ERD; /* abort dma command */ /* Abort any transmit and receive operations and put DMA * in idle state. */ abort_dma(pep); /* Disable port */ val = rdl(pep, PORT_CONFIG); val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); phy_stop(dev->phydev); } /* * txq_reclaim - Free the tx desc data for completed descriptors * If force is non-zero, frees uncompleted descriptors as well */ static int txq_reclaim(struct net_device *dev, int force) { struct pxa168_eth_private *pep = netdev_priv(dev); struct tx_desc *desc; u32 cmd_sts; struct sk_buff *skb; int tx_index; dma_addr_t addr; int count; int released = 0; netif_tx_lock(dev); pep->work_todo &= ~WORK_TX_DONE; while (pep->tx_desc_count > 0) { tx_index = pep->tx_used_desc_q; desc = &pep->p_tx_desc_area[tx_index]; cmd_sts = desc->cmd_sts; if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { if (released > 0) { goto txq_reclaim_end; } else { released = -1; goto txq_reclaim_end; } } pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; pep->tx_desc_count--; addr = desc->buf_ptr; count = desc->byte_cnt; skb = pep->tx_skb[tx_index]; if (skb) pep->tx_skb[tx_index] = NULL; if (cmd_sts & TX_ERROR) { if (net_ratelimit()) netdev_err(dev, "Error in TX\n"); dev->stats.tx_errors++; } dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE); if (skb) dev_kfree_skb_irq(skb); released++; } txq_reclaim_end: netif_tx_unlock(dev); return released; } static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct pxa168_eth_private *pep = netdev_priv(dev); netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count); schedule_work(&pep->tx_timeout_task); } static void pxa168_eth_tx_timeout_task(struct work_struct *work) { struct pxa168_eth_private *pep = container_of(work, struct pxa168_eth_private, tx_timeout_task); struct net_device *dev = pep->dev; pxa168_eth_stop(dev); pxa168_eth_open(dev); } static int rxq_process(struct net_device *dev, int budget) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int received_packets = 0; struct sk_buff *skb; while (budget-- > 0) { int rx_next_curr_desc, rx_curr_desc, rx_used_desc; struct rx_desc *rx_desc; unsigned int cmd_sts; /* Do not process Rx ring in case of Rx ring resource error */ if (pep->rx_resource_err) break; rx_curr_desc = pep->rx_curr_desc_q; rx_used_desc = pep->rx_used_desc_q; rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; dma_rmb(); if (cmd_sts & (BUF_OWNED_BY_DMA)) break; skb = pep->rx_skb[rx_curr_desc]; pep->rx_skb[rx_curr_desc] = NULL; rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; pep->rx_curr_desc_q = rx_next_curr_desc; /* Rx descriptors exhausted. */ /* Set the Rx ring resource error flag */ if (rx_next_curr_desc == rx_used_desc) pep->rx_resource_err = 1; pep->rx_desc_count--; dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); received_packets++; /* * Update statistics. * Note byte count includes 4 byte CRC count */ stats->rx_packets++; stats->rx_bytes += rx_desc->byte_cnt; /* * In case received a packet without first / last bits on OR * the error summary bit is on, the packets needs to be droped. */ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) || (cmd_sts & RX_ERROR)) { stats->rx_dropped++; if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) netdev_err(dev, "Rx pkt on multiple desc\n"); } if (cmd_sts & RX_ERROR) stats->rx_errors++; dev_kfree_skb_irq(skb); } else { /* * The -4 is for the CRC in the trailer of the * received packet */ skb_put(skb, rx_desc->byte_cnt - 4); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } } /* Fill RX ring with skb's */ rxq_refill(dev); return received_packets; } static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, struct net_device *dev) { u32 icr; int ret = 0; icr = rdl(pep, INT_CAUSE); if (icr == 0) return IRQ_NONE; wrl(pep, INT_CAUSE, ~icr); if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { pep->work_todo |= WORK_TX_DONE; ret = 1; } if (icr & ICR_RXBUF) ret = 1; return ret; } static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct pxa168_eth_private *pep = netdev_priv(dev); if (unlikely(!pxa168_eth_collect_events(pep, dev))) return IRQ_NONE; /* Disable interrupts */ wrl(pep, INT_MASK, 0); napi_schedule(&pep->napi); return IRQ_HANDLED; } static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) { int skb_size; /* * Reserve 2+14 bytes for an ethernet header (the hardware * automatically prepends 2 bytes of dummy data to each * received packet), 16 bytes for up to four VLAN tags, and * 4 bytes for the trailing FCS -- 36 bytes total. */ skb_size = pep->dev->mtu + 36; /* * Make sure that the skb size is a multiple of 8 bytes, as * the lower three bits of the receive descriptor's buffer * size field are ignored by the hardware. */ pep->skb_size = (skb_size + 7) & ~7; /* * If NET_SKB_PAD is smaller than a cache line, * netdev_alloc_skb() will cause skb->data to be misaligned * to a cache line boundary. If this is the case, include * some extra space to allow re-aligning the data area. */ pep->skb_size += SKB_DMA_REALIGN; } static int set_port_config_ext(struct pxa168_eth_private *pep) { int skb_size; pxa168_eth_recalc_skb_size(pep); if (pep->skb_size <= 1518) skb_size = PCXR_MFL_1518; else if (pep->skb_size <= 1536) skb_size = PCXR_MFL_1536; else if (pep->skb_size <= 2048) skb_size = PCXR_MFL_2048; else skb_size = PCXR_MFL_64K; /* Extended Port Configuration */ wrl(pep, PORT_CONFIG_EXT, PCXR_AN_SPEED_DIS | /* Disable HW AN */ PCXR_AN_DUPLEX_DIS | PCXR_AN_FLOWCTL_DIS | PCXR_2BSM | /* Two byte prefix aligns IP hdr */ PCXR_DSCP_EN | /* Enable DSCP in IP */ skb_size | PCXR_FLP | /* do not force link pass */ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ return 0; } static void pxa168_eth_adjust_link(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct phy_device *phy = dev->phydev; u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); cfg = cfg_o & ~PCR_DUPLEX_FULL; cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); if (phy->interface == PHY_INTERFACE_MODE_RMII) cfgext |= PCXR_RMII_EN; if (phy->speed == SPEED_100) cfgext |= PCXR_SPEED_100; if (phy->duplex) cfg |= PCR_DUPLEX_FULL; if (!phy->pause) cfgext |= PCXR_FLOWCTL_DIS; /* Bail out if there has nothing changed */ if (cfg == cfg_o && cfgext == cfgext_o) return; wrl(pep, PORT_CONFIG, cfg); wrl(pep, PORT_CONFIG_EXT, cfgext); phy_print_status(phy); } static int pxa168_init_phy(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct ethtool_link_ksettings cmd; struct phy_device *phy = NULL; int err; if (dev->phydev) return 0; phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr); if (IS_ERR(phy)) return PTR_ERR(phy); err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link, pep->phy_intf); if (err) return err; cmd.base.phy_address = pep->phy_addr; cmd.base.speed = pep->phy_speed; cmd.base.duplex = pep->phy_duplex; linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES); cmd.base.autoneg = AUTONEG_ENABLE; if (cmd.base.speed != 0) cmd.base.autoneg = AUTONEG_DISABLE; return phy_ethtool_set_link_ksettings(dev, &cmd); } static int pxa168_init_hw(struct pxa168_eth_private *pep) { int err = 0; /* Disable interrupts */ wrl(pep, INT_MASK, 0); wrl(pep, INT_CAUSE, 0); /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); /* Abort any transmit and receive operations and put DMA * in idle state. */ abort_dma(pep); /* Initialize address hash table */ err = init_hash_table(pep); if (err) return err; /* SDMA configuration */ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ SDCR_RIFB | /* Rx interrupt on frame */ SDCR_BLMT | /* Little endian transmit */ SDCR_BLMR | /* Little endian receive */ SDCR_RC_MAX_RETRANS); /* Max retransmit count */ /* Port Configuration */ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ set_port_config_ext(pep); return err; } static int rxq_init(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct rx_desc *p_rx_desc; int size = 0, i = 0; int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; /* Allocate RX ring */ pep->rx_desc_count = 0; size = pep->rx_ring_size * sizeof(struct rx_desc); pep->rx_desc_area_size = size; pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, &pep->rx_desc_dma, GFP_KERNEL); if (!pep->p_rx_desc_area) goto out; /* initialize the next_desc_ptr links in the Rx descriptors ring */ p_rx_desc = pep->p_rx_desc_area; for (i = 0; i < rx_desc_num; i++) { p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); } /* Save Rx desc pointer to driver struct. */ pep->rx_curr_desc_q = 0; pep->rx_used_desc_q = 0; pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); return 0; out: kfree(pep->rx_skb); return -ENOMEM; } static void rxq_deinit(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); int curr; /* Free preallocated skb's on RX rings */ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { if (pep->rx_skb[curr]) { dev_kfree_skb(pep->rx_skb[curr]); pep->rx_desc_count--; } } if (pep->rx_desc_count) netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n", pep->rx_desc_count); /* Free RX ring */ if (pep->p_rx_desc_area) dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, pep->p_rx_desc_area, pep->rx_desc_dma); kfree(pep->rx_skb); } static int txq_init(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct tx_desc *p_tx_desc; int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; /* Allocate TX ring */ pep->tx_desc_count = 0; size = pep->tx_ring_size * sizeof(struct tx_desc); pep->tx_desc_area_size = size; pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, &pep->tx_desc_dma, GFP_KERNEL); if (!pep->p_tx_desc_area) goto out; /* Initialize the next_desc_ptr links in the Tx descriptors ring */ p_tx_desc = pep->p_tx_desc_area; for (i = 0; i < tx_desc_num; i++) { p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); } pep->tx_curr_desc_q = 0; pep->tx_used_desc_q = 0; pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); return 0; out: kfree(pep->tx_skb); return -ENOMEM; } static void txq_deinit(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); /* Free outstanding skb's on TX ring */ txq_reclaim(dev, 1); BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); /* Free TX ring */ if (pep->p_tx_desc_area) dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, pep->p_tx_desc_area, pep->tx_desc_dma); kfree(pep->tx_skb); } static int pxa168_eth_open(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); int err; err = pxa168_init_phy(dev); if (err) return err; err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); if (err) { dev_err(&dev->dev, "can't assign irq\n"); return -EAGAIN; } pep->rx_resource_err = 0; err = rxq_init(dev); if (err != 0) goto out_free_irq; err = txq_init(dev); if (err != 0) goto out_free_rx_skb; pep->rx_used_desc_q = 0; pep->rx_curr_desc_q = 0; /* Fill RX ring with skb's */ rxq_refill(dev); pep->rx_used_desc_q = 0; pep->rx_curr_desc_q = 0; netif_carrier_off(dev); napi_enable(&pep->napi); eth_port_start(dev); return 0; out_free_rx_skb: rxq_deinit(dev); out_free_irq: free_irq(dev->irq, dev); return err; } static int pxa168_eth_stop(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); eth_port_reset(dev); /* Disable interrupts */ wrl(pep, INT_MASK, 0); wrl(pep, INT_CAUSE, 0); /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); napi_disable(&pep->napi); del_timer_sync(&pep->timeout); netif_carrier_off(dev); free_irq(dev->irq, dev); rxq_deinit(dev); txq_deinit(dev); return 0; } static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) { struct pxa168_eth_private *pep = netdev_priv(dev); dev->mtu = mtu; set_port_config_ext(pep); if (!netif_running(dev)) return 0; /* * Stop and then re-open the interface. This will allocate RX * skbs of the new MTU. * There is a possible danger that the open will not succeed, * due to memory being full. */ pxa168_eth_stop(dev); if (pxa168_eth_open(dev)) { dev_err(&dev->dev, "fatal error on re-opening device after MTU change\n"); } return 0; } static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) { int tx_desc_curr; tx_desc_curr = pep->tx_curr_desc_q; pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); pep->tx_desc_count++; return tx_desc_curr; } static int pxa168_rx_poll(struct napi_struct *napi, int budget) { struct pxa168_eth_private *pep = container_of(napi, struct pxa168_eth_private, napi); struct net_device *dev = pep->dev; int work_done = 0; /* * We call txq_reclaim every time since in NAPI interupts are disabled * and due to this we miss the TX_DONE interrupt,which is not updated in * interrupt status register. */ txq_reclaim(dev, 0); if (netif_queue_stopped(dev) && pep->tx_ring_size - pep->tx_desc_count > 1) { netif_wake_queue(dev); } work_done = rxq_process(dev, budget); if (work_done < budget) { napi_complete_done(napi, work_done); wrl(pep, INT_MASK, ALL_INTS); } return work_done; } static netdev_tx_t pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct tx_desc *desc; int tx_index; int length; tx_index = eth_alloc_tx_desc_index(pep); desc = &pep->p_tx_desc_area[tx_index]; length = skb->len; pep->tx_skb[tx_index] = skb; desc->byte_cnt = length; desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length, DMA_TO_DEVICE); skb_tx_timestamp(skb); dma_wmb(); desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; wmb(); wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); stats->tx_bytes += length; stats->tx_packets++; netif_trans_update(dev); if (pep->tx_ring_size - pep->tx_desc_count <= 1) { /* We handled the current skb, but now we are out of space.*/ netif_stop_queue(dev); } return NETDEV_TX_OK; } static int smi_wait_ready(struct pxa168_eth_private *pep) { int i = 0; /* wait for the SMI register to become available */ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) return -ETIMEDOUT; msleep(10); } return 0; } static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) { struct pxa168_eth_private *pep = bus->priv; int i = 0; int val; if (smi_wait_ready(pep)) { netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); /* now wait for the data to be valid */ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { netdev_warn(pep->dev, "pxa168_eth: SMI bus read not valid\n"); return -ENODEV; } msleep(10); } return val & 0xffff; } static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) { struct pxa168_eth_private *pep = bus->priv; if (smi_wait_ready(pep)) { netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_W | (value & 0xffff)); if (smi_wait_ready(pep)) { netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void pxa168_eth_netpoll(struct net_device *dev) { disable_irq(dev->irq); pxa168_eth_int_handler(dev->irq, dev); enable_irq(dev->irq); } #endif static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); strscpy(info->bus_info, "N/A", sizeof(info->bus_info)); } static const struct ethtool_ops pxa168_ethtool_ops = { .get_drvinfo = pxa168_get_drvinfo, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops pxa168_eth_netdev_ops = { .ndo_open = pxa168_eth_open, .ndo_stop = pxa168_eth_stop, .ndo_start_xmit = pxa168_eth_start_xmit, .ndo_set_rx_mode = pxa168_eth_set_rx_mode, .ndo_set_mac_address = pxa168_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl, .ndo_change_mtu = pxa168_eth_change_mtu, .ndo_tx_timeout = pxa168_eth_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pxa168_eth_netpoll, #endif }; static int pxa168_eth_probe(struct platform_device *pdev) { struct pxa168_eth_private *pep = NULL; struct net_device *dev = NULL; struct clk *clk; struct device_node *np; int err; printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); return -ENODEV; } clk_prepare_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); if (!dev) { err = -ENOMEM; goto err_clk; } platform_set_drvdata(pdev, dev); pep = netdev_priv(dev); pep->dev = dev; pep->clk = clk; pep->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pep->base)) { err = PTR_ERR(pep->base); goto err_netdev; } err = platform_get_irq(pdev, 0); if (err == -EPROBE_DEFER) goto err_netdev; BUG_ON(dev->irq < 0); dev->irq = err; dev->netdev_ops = &pxa168_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; dev->ethtool_ops = &pxa168_ethtool_ops; /* MTU range: 68 - 9500 */ dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = 9500; INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); err = of_get_ethdev_address(pdev->dev.of_node, dev); if (err) { u8 addr[ETH_ALEN]; /* try reading the mac address, if set by the bootloader */ pxa168_eth_get_mac_address(dev, addr); if (is_valid_ether_addr(addr)) { eth_hw_addr_set(dev, addr); } else { dev_info(&pdev->dev, "Using random mac address\n"); eth_hw_addr_random(dev); } } pep->rx_ring_size = NUM_RX_DESCS; pep->tx_ring_size = NUM_TX_DESCS; pep->pd = dev_get_platdata(&pdev->dev); if (pep->pd) { if (pep->pd->rx_queue_size) pep->rx_ring_size = pep->pd->rx_queue_size; if (pep->pd->tx_queue_size) pep->tx_ring_size = pep->pd->tx_queue_size; pep->port_num = pep->pd->port_number; pep->phy_addr = pep->pd->phy_addr; pep->phy_speed = pep->pd->speed; pep->phy_duplex = pep->pd->duplex; pep->phy_intf = pep->pd->intf; if (pep->pd->init) pep->pd->init(); } else if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "port-id", &pep->port_num); np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!np) { dev_err(&pdev->dev, "missing phy-handle\n"); err = -EINVAL; goto err_netdev; } of_property_read_u32(np, "reg", &pep->phy_addr); of_node_put(np); err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf); if (err && err != -ENODEV) goto err_netdev; } /* Hardware supports only 3 ports */ BUG_ON(pep->port_num > 2); netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); memset(&pep->timeout, 0, sizeof(struct timer_list)); timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0); pep->smi_bus = mdiobus_alloc(); if (!pep->smi_bus) { err = -ENOMEM; goto err_netdev; } pep->smi_bus->priv = pep; pep->smi_bus->name = "pxa168_eth smi"; pep->smi_bus->read = pxa168_smi_read; pep->smi_bus->write = pxa168_smi_write; snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); pep->smi_bus->parent = &pdev->dev; pep->smi_bus->phy_mask = 0xffffffff; err = mdiobus_register(pep->smi_bus); if (err) goto err_free_mdio; pep->pdev = pdev; SET_NETDEV_DEV(dev, &pdev->dev); pxa168_init_hw(pep); err = register_netdev(dev); if (err) goto err_mdiobus; return 0; err_mdiobus: mdiobus_unregister(pep->smi_bus); err_free_mdio: mdiobus_free(pep->smi_bus); err_netdev: free_netdev(dev); err_clk: clk_disable_unprepare(clk); return err; } static int pxa168_eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct pxa168_eth_private *pep = netdev_priv(dev); cancel_work_sync(&pep->tx_timeout_task); if (pep->htpr) { dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, pep->htpr, pep->htpr_dma); pep->htpr = NULL; } if (dev->phydev) phy_disconnect(dev->phydev); clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); free_netdev(dev); return 0; } static void pxa168_eth_shutdown(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); eth_port_reset(dev); } #ifdef CONFIG_PM static int pxa168_eth_resume(struct platform_device *pdev) { return -ENOSYS; } static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } #else #define pxa168_eth_resume NULL #define pxa168_eth_suspend NULL #endif static const struct of_device_id pxa168_eth_of_match[] = { { .compatible = "marvell,pxa168-eth" }, { }, }; MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, .remove = pxa168_eth_remove, .shutdown = pxa168_eth_shutdown, .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, .driver = { .name = DRIVER_NAME, .of_match_table = pxa168_eth_of_match, }, }; module_platform_driver(pxa168_eth_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); MODULE_ALIAS("platform:pxa168_eth");
linux-master
drivers/net/ethernet/marvell/pxa168_eth.c
/* * Driver for the MDIO interface of Marvell network interfaces. * * Since the MDIO interface of Marvell network interfaces is shared * between all network interfaces, having a single driver allows to * handle concurrent accesses properly (you may have four Ethernet * ports, but they in fact share the same SMI interface to access * the MDIO bus). This driver is currently used by the mvneta and * mv643xx_eth drivers. * * Copyright (C) 2012 Marvell * * Thomas Petazzoni <[email protected]> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> #include <linux/acpi_mdio.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/wait.h> #define MVMDIO_SMI_DATA_SHIFT 0 #define MVMDIO_SMI_PHY_ADDR_SHIFT 16 #define MVMDIO_SMI_PHY_REG_SHIFT 21 #define MVMDIO_SMI_READ_OPERATION BIT(26) #define MVMDIO_SMI_WRITE_OPERATION 0 #define MVMDIO_SMI_READ_VALID BIT(27) #define MVMDIO_SMI_BUSY BIT(28) #define MVMDIO_ERR_INT_CAUSE 0x007C #define MVMDIO_ERR_INT_SMI_DONE 0x00000010 #define MVMDIO_ERR_INT_MASK 0x0080 #define MVMDIO_XSMI_MGNT_REG 0x0 #define MVMDIO_XSMI_PHYADDR_SHIFT 16 #define MVMDIO_XSMI_DEVADDR_SHIFT 21 #define MVMDIO_XSMI_WRITE_OPERATION (0x5 << 26) #define MVMDIO_XSMI_READ_OPERATION (0x7 << 26) #define MVMDIO_XSMI_READ_VALID BIT(29) #define MVMDIO_XSMI_BUSY BIT(30) #define MVMDIO_XSMI_ADDR_REG 0x8 /* * SMI Timeout measurements: * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt) * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) */ #define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ #define MVMDIO_SMI_POLL_INTERVAL_MIN 45 #define MVMDIO_SMI_POLL_INTERVAL_MAX 55 #define MVMDIO_XSMI_POLL_INTERVAL_MIN 150 #define MVMDIO_XSMI_POLL_INTERVAL_MAX 160 struct orion_mdio_dev { void __iomem *regs; struct clk *clk[4]; /* * If we have access to the error interrupt pin (which is * somewhat misnamed as it not only reflects internal errors * but also reflects SMI completion), use that to wait for * SMI access completion instead of polling the SMI busy bit. */ int err_interrupt; wait_queue_head_t smi_busy_wait; }; enum orion_mdio_bus_type { BUS_TYPE_SMI, BUS_TYPE_XSMI }; struct orion_mdio_ops { int (*is_done)(struct orion_mdio_dev *); unsigned int poll_interval_min; unsigned int poll_interval_max; }; /* Wait for the SMI unit to be ready for another operation */ static int orion_mdio_wait_ready(const struct orion_mdio_ops *ops, struct mii_bus *bus) { struct orion_mdio_dev *dev = bus->priv; unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); unsigned long end = jiffies + timeout; int timedout = 0; while (1) { if (ops->is_done(dev)) return 0; else if (timedout) break; if (dev->err_interrupt <= 0) { usleep_range(ops->poll_interval_min, ops->poll_interval_max); if (time_is_before_jiffies(end)) ++timedout; } else { /* wait_event_timeout does not guarantee a delay of at * least one whole jiffie, so timeout must be no less * than two. */ if (timeout < 2) timeout = 2; wait_event_timeout(dev->smi_busy_wait, ops->is_done(dev), timeout); ++timedout; } } dev_err(bus->parent, "Timeout: SMI busy for too long\n"); return -ETIMEDOUT; } static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) { return !(readl(dev->regs) & MVMDIO_SMI_BUSY); } static const struct orion_mdio_ops orion_mdio_smi_ops = { .is_done = orion_mdio_smi_is_done, .poll_interval_min = MVMDIO_SMI_POLL_INTERVAL_MIN, .poll_interval_max = MVMDIO_SMI_POLL_INTERVAL_MAX, }; static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id, int regnum) { struct orion_mdio_dev *dev = bus->priv; u32 val; int ret; ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); if (ret < 0) return ret; writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | MVMDIO_SMI_READ_OPERATION), dev->regs); ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); if (ret < 0) return ret; val = readl(dev->regs); if (!(val & MVMDIO_SMI_READ_VALID)) { dev_err(bus->parent, "SMI bus read not valid\n"); return -ENODEV; } return val & GENMASK(15, 0); } static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct orion_mdio_dev *dev = bus->priv; int ret; ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus); if (ret < 0) return ret; writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | MVMDIO_SMI_WRITE_OPERATION | (value << MVMDIO_SMI_DATA_SHIFT)), dev->regs); return 0; } static int orion_mdio_xsmi_is_done(struct orion_mdio_dev *dev) { return !(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_BUSY); } static const struct orion_mdio_ops orion_mdio_xsmi_ops = { .is_done = orion_mdio_xsmi_is_done, .poll_interval_min = MVMDIO_XSMI_POLL_INTERVAL_MIN, .poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX, }; static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id, int dev_addr, int regnum) { struct orion_mdio_dev *dev = bus->priv; int ret; ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); if (ret < 0) return ret; writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG); writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | MVMDIO_XSMI_READ_OPERATION, dev->regs + MVMDIO_XSMI_MGNT_REG); ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); if (ret < 0) return ret; if (!(readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & MVMDIO_XSMI_READ_VALID)) { dev_err(bus->parent, "XSMI bus read not valid\n"); return -ENODEV; } return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0); } static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id, int dev_addr, int regnum, u16 value) { struct orion_mdio_dev *dev = bus->priv; int ret; ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus); if (ret < 0) return ret; writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG); writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) | (dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) | MVMDIO_XSMI_WRITE_OPERATION | value, dev->regs + MVMDIO_XSMI_MGNT_REG); return 0; } static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) { struct orion_mdio_dev *dev = dev_id; if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) & MVMDIO_ERR_INT_SMI_DONE) { writel(~MVMDIO_ERR_INT_SMI_DONE, dev->regs + MVMDIO_ERR_INT_CAUSE); wake_up(&dev->smi_busy_wait); return IRQ_HANDLED; } return IRQ_NONE; } static int orion_mdio_probe(struct platform_device *pdev) { enum orion_mdio_bus_type type; struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; int i, ret; type = (uintptr_t)device_get_match_data(&pdev->dev); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "No SMI register address given\n"); return -ENODEV; } bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct orion_mdio_dev)); if (!bus) return -ENOMEM; switch (type) { case BUS_TYPE_SMI: bus->read = orion_mdio_smi_read; bus->write = orion_mdio_smi_write; break; case BUS_TYPE_XSMI: bus->read_c45 = orion_mdio_xsmi_read_c45; bus->write_c45 = orion_mdio_xsmi_write_c45; break; } bus->name = "orion_mdio_bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); bus->parent = &pdev->dev; dev = bus->priv; dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!dev->regs) { dev_err(&pdev->dev, "Unable to remap SMI register\n"); return -ENODEV; } init_waitqueue_head(&dev->smi_busy_wait); if (pdev->dev.of_node) { for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { dev->clk[i] = of_clk_get(pdev->dev.of_node, i); if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_clk; } if (IS_ERR(dev->clk[i])) break; clk_prepare_enable(dev->clk[i]); } if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk)))) dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first " __stringify(ARRAY_SIZE(dev->clk)) "\n"); } else { dev->clk[0] = clk_get(&pdev->dev, NULL); if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_clk; } if (!IS_ERR(dev->clk[0])) clk_prepare_enable(dev->clk[0]); } dev->err_interrupt = platform_get_irq_optional(pdev, 0); if (dev->err_interrupt > 0 && resource_size(r) < MVMDIO_ERR_INT_MASK + 4) { dev_err(&pdev->dev, "disabling interrupt, resource size is too small\n"); dev->err_interrupt = 0; } if (dev->err_interrupt > 0) { ret = devm_request_irq(&pdev->dev, dev->err_interrupt, orion_mdio_err_irq, IRQF_SHARED, pdev->name, dev); if (ret) goto out_mdio; writel(MVMDIO_ERR_INT_SMI_DONE, dev->regs + MVMDIO_ERR_INT_MASK); } else if (dev->err_interrupt == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_mdio; } /* For the platforms not supporting DT/ACPI fall-back * to mdiobus_register via of_mdiobus_register. */ if (is_acpi_node(pdev->dev.fwnode)) ret = acpi_mdiobus_register(bus, pdev->dev.fwnode); else ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); goto out_mdio; } platform_set_drvdata(pdev, bus); return 0; out_mdio: if (dev->err_interrupt > 0) writel(0, dev->regs + MVMDIO_ERR_INT_MASK); out_clk: for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { if (IS_ERR(dev->clk[i])) break; clk_disable_unprepare(dev->clk[i]); clk_put(dev->clk[i]); } return ret; } static int orion_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct orion_mdio_dev *dev = bus->priv; int i; if (dev->err_interrupt > 0) writel(0, dev->regs + MVMDIO_ERR_INT_MASK); mdiobus_unregister(bus); for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { if (IS_ERR(dev->clk[i])) break; clk_disable_unprepare(dev->clk[i]); clk_put(dev->clk[i]); } return 0; } static const struct of_device_id orion_mdio_match[] = { { .compatible = "marvell,orion-mdio", .data = (void *)BUS_TYPE_SMI }, { .compatible = "marvell,xmdio", .data = (void *)BUS_TYPE_XSMI }, { } }; MODULE_DEVICE_TABLE(of, orion_mdio_match); #ifdef CONFIG_ACPI static const struct acpi_device_id orion_mdio_acpi_match[] = { { "MRVL0100", BUS_TYPE_SMI }, { "MRVL0101", BUS_TYPE_XSMI }, { }, }; MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); #endif static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, .remove = orion_mdio_remove, .driver = { .name = "orion-mdio", .of_match_table = orion_mdio_match, .acpi_match_table = ACPI_PTR(orion_mdio_acpi_match), }, }; module_platform_driver(orion_mdio_driver); MODULE_DESCRIPTION("Marvell MDIO interface driver"); MODULE_AUTHOR("Thomas Petazzoni <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:orion-mdio");
linux-master
drivers/net/ethernet/marvell/mvmdio.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include "octep_config.h" #include "octep_main.h" #include "octep_ctrl_net.h" static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_alloc_errors", "tx_busy_errors", "rx_dropped", "tx_dropped", "tx_hw_pkts", "tx_hw_octs", "tx_hw_bcast", "tx_hw_mcast", "tx_hw_underflow", "tx_hw_control", "tx_less_than_64", "tx_equal_64", "tx_equal_65_to_127", "tx_equal_128_to_255", "tx_equal_256_to_511", "tx_equal_512_to_1023", "tx_equal_1024_to_1518", "tx_greater_than_1518", "rx_hw_pkts", "rx_hw_bytes", "rx_hw_bcast", "rx_hw_mcast", "rx_pause_pkts", "rx_pause_bytes", "rx_dropped_pkts_fifo_full", "rx_dropped_bytes_fifo_full", "rx_err_pkts", }; #define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN) static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { "tx_packets_posted[Q-%u]", "tx_packets_completed[Q-%u]", "tx_bytes[Q-%u]", "tx_busy[Q-%u]", }; #define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN) static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { "rx_packets[Q-%u]", "rx_bytes[Q-%u]", "rx_alloc_errors[Q-%u]", }; #define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN) static void octep_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct octep_device *oct = netdev_priv(netdev); strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); } static void octep_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct octep_device *oct = netdev_priv(netdev); u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); char *strings = (char *)data; int i, j; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) { snprintf(strings, ETH_GSTRING_LEN, octep_gstrings_global_stats[i]); strings += ETH_GSTRING_LEN; } for (i = 0; i < num_queues; i++) { for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) { snprintf(strings, ETH_GSTRING_LEN, octep_gstrings_tx_q_stats[j], i); strings += ETH_GSTRING_LEN; } } for (i = 0; i < num_queues; i++) { for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) { snprintf(strings, ETH_GSTRING_LEN, octep_gstrings_rx_q_stats[j], i); strings += ETH_GSTRING_LEN; } } break; default: break; } } static int octep_get_sset_count(struct net_device *netdev, int sset) { struct octep_device *oct = netdev_priv(netdev); u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); switch (sset) { case ETH_SS_STATS: return OCTEP_GLOBAL_STATS_CNT + (num_queues * (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT)); break; default: return -EOPNOTSUPP; } } static void octep_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct octep_device *oct = netdev_priv(netdev); struct octep_iface_tx_stats *iface_tx_stats; struct octep_iface_rx_stats *iface_rx_stats; u64 rx_packets, rx_bytes; u64 tx_packets, tx_bytes; u64 rx_alloc_errors, tx_busy_errors; int q, i; rx_packets = 0; rx_bytes = 0; tx_packets = 0; tx_bytes = 0; rx_alloc_errors = 0; tx_busy_errors = 0; tx_packets = 0; tx_bytes = 0; rx_packets = 0; rx_bytes = 0; iface_tx_stats = &oct->iface_tx_stats; iface_rx_stats = &oct->iface_rx_stats; octep_ctrl_net_get_if_stats(oct, OCTEP_CTRL_NET_INVALID_VFID, iface_rx_stats, iface_tx_stats); for (q = 0; q < oct->num_oqs; q++) { struct octep_iq *iq = oct->iq[q]; struct octep_oq *oq = oct->oq[q]; tx_packets += iq->stats.instr_completed; tx_bytes += iq->stats.bytes_sent; tx_busy_errors += iq->stats.tx_busy; rx_packets += oq->stats.packets; rx_bytes += oq->stats.bytes; rx_alloc_errors += oq->stats.alloc_failures; } i = 0; data[i++] = rx_packets; data[i++] = tx_packets; data[i++] = rx_bytes; data[i++] = tx_bytes; data[i++] = rx_alloc_errors; data[i++] = tx_busy_errors; data[i++] = iface_rx_stats->dropped_pkts_fifo_full + iface_rx_stats->err_pkts; data[i++] = iface_tx_stats->xscol + iface_tx_stats->xsdef; data[i++] = iface_tx_stats->pkts; data[i++] = iface_tx_stats->octs; data[i++] = iface_tx_stats->bcst; data[i++] = iface_tx_stats->mcst; data[i++] = iface_tx_stats->undflw; data[i++] = iface_tx_stats->ctl; data[i++] = iface_tx_stats->hist_lt64; data[i++] = iface_tx_stats->hist_eq64; data[i++] = iface_tx_stats->hist_65to127; data[i++] = iface_tx_stats->hist_128to255; data[i++] = iface_tx_stats->hist_256to511; data[i++] = iface_tx_stats->hist_512to1023; data[i++] = iface_tx_stats->hist_1024to1518; data[i++] = iface_tx_stats->hist_gt1518; data[i++] = iface_rx_stats->pkts; data[i++] = iface_rx_stats->octets; data[i++] = iface_rx_stats->mcast_pkts; data[i++] = iface_rx_stats->bcast_pkts; data[i++] = iface_rx_stats->pause_pkts; data[i++] = iface_rx_stats->pause_octets; data[i++] = iface_rx_stats->dropped_pkts_fifo_full; data[i++] = iface_rx_stats->dropped_octets_fifo_full; data[i++] = iface_rx_stats->err_pkts; /* Per Tx Queue stats */ for (q = 0; q < oct->num_iqs; q++) { struct octep_iq *iq = oct->iq[q]; data[i++] = iq->stats.instr_posted; data[i++] = iq->stats.instr_completed; data[i++] = iq->stats.bytes_sent; data[i++] = iq->stats.tx_busy; } /* Per Rx Queue stats */ for (q = 0; q < oct->num_oqs; q++) { struct octep_oq *oq = oct->oq[q]; data[i++] = oq->stats.packets; data[i++] = oq->stats.bytes; data[i++] = oq->stats.alloc_failures; } } #define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \ { \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ } static int octep_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct octep_device *oct = netdev_priv(netdev); struct octep_iface_link_info *link_info; u32 advertised_modes, supported_modes; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); link_info = &oct->link_info; octep_ctrl_net_get_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, link_info); advertised_modes = oct->link_info.advertised_modes; supported_modes = oct->link_info.supported_modes; OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); if (link_info->autoneg) { if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED) ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) { ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; } else { cmd->base.autoneg = AUTONEG_DISABLE; } } else { cmd->base.autoneg = AUTONEG_DISABLE; } if (link_info->pause) { if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED) ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); } cmd->base.port = PORT_FIBRE; ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); if (netif_carrier_ok(netdev)) { cmd->base.speed = link_info->speed; cmd->base.duplex = DUPLEX_FULL; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } return 0; } static int octep_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct octep_device *oct = netdev_priv(netdev); struct octep_iface_link_info link_info_new; struct octep_iface_link_info *link_info; u64 advertised = 0; u8 autoneg = 0; int err; link_info = &oct->link_info; memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info)); /* Only Full duplex is supported; * Assume full duplex when duplex is unknown. */ if (cmd->base.duplex != DUPLEX_FULL && cmd->base.duplex != DUPLEX_UNKNOWN) return -EOPNOTSUPP; if (cmd->base.autoneg == AUTONEG_ENABLE) { if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)) return -EOPNOTSUPP; autoneg = 1; } if (!bitmap_subset(cmd->link_modes.advertising, cmd->link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseT_Full)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseR_FEC)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseCR_Full)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseKR_Full)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseLR_Full)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 10000baseSR_Full)) advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 25000baseCR_Full)) advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 25000baseKR_Full)) advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 25000baseSR_Full)) advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 40000baseCR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 40000baseKR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 40000baseLR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 40000baseSR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseCR2_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseKR2_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseSR2_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseCR_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseKR_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseLR_ER_FR_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 50000baseSR_Full)) advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100000baseCR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100000baseKR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100000baseLR4_ER4_Full)) advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4); if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 100000baseSR4_Full)) advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4); if (advertised == link_info->advertised_modes && cmd->base.speed == link_info->speed && cmd->base.autoneg == link_info->autoneg) return 0; link_info_new.advertised_modes = advertised; link_info_new.speed = cmd->base.speed; link_info_new.autoneg = autoneg; err = octep_ctrl_net_set_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, &link_info_new, true); if (err) return err; memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info)); return 0; } static const struct ethtool_ops octep_ethtool_ops = { .get_drvinfo = octep_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = octep_get_strings, .get_sset_count = octep_get_sset_count, .get_ethtool_stats = octep_get_ethtool_stats, .get_link_ksettings = octep_get_link_ksettings, .set_link_ksettings = octep_set_link_ksettings, }; void octep_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &octep_ethtool_ops; }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include "octep_config.h" #include "octep_main.h" #include "octep_ctrl_net.h" #define OCTEP_INTR_POLL_TIME_MSECS 100 struct workqueue_struct *octep_wq; /* Supported Devices */ static const struct pci_device_id octep_pci_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, {0, }, }; MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); MODULE_AUTHOR("Veerasenareddy Burru <[email protected]>"); MODULE_DESCRIPTION(OCTEP_DRV_STRING); MODULE_LICENSE("GPL"); /** * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. * * @oct: Octeon device private data structure. * * Allocate resources to hold per Tx/Rx queue interrupt info. * This is the information passed to interrupt handler, from which napi poll * is scheduled and includes quick access to private data of Tx/Rx queue * corresponding to the interrupt being handled. * * Return: 0, on successful allocation of resources for all queue interrupts. * -1, if failed to allocate any resource. */ static int octep_alloc_ioq_vectors(struct octep_device *oct) { int i; struct octep_ioq_vector *ioq_vector; for (i = 0; i < oct->num_oqs; i++) { oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); if (!oct->ioq_vector[i]) goto free_ioq_vector; ioq_vector = oct->ioq_vector[i]; ioq_vector->iq = oct->iq[i]; ioq_vector->oq = oct->oq[i]; ioq_vector->octep_dev = oct; } dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); return 0; free_ioq_vector: while (i) { i--; vfree(oct->ioq_vector[i]); oct->ioq_vector[i] = NULL; } return -1; } /** * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. * * @oct: Octeon device private data structure. */ static void octep_free_ioq_vectors(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) { if (oct->ioq_vector[i]) { vfree(oct->ioq_vector[i]); oct->ioq_vector[i] = NULL; } } netdev_info(oct->netdev, "Freed IOQ Vectors\n"); } /** * octep_enable_msix_range() - enable MSI-x interrupts. * * @oct: Octeon device private data structure. * * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) * for the Octeon device. * * Return: 0, on successfully enabling all MSI-x interrupts. * -1, if failed to enable any MSI-x interrupt. */ static int octep_enable_msix_range(struct octep_device *oct) { int num_msix, msix_allocated; int i; /* Generic interrupts apart from input/output queues */ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL); if (!oct->msix_entries) goto msix_alloc_err; for (i = 0; i < num_msix; i++) oct->msix_entries[i].entry = i; msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, num_msix, num_msix); if (msix_allocated != num_msix) { dev_err(&oct->pdev->dev, "Failed to enable %d msix irqs; got only %d\n", num_msix, msix_allocated); goto enable_msix_err; } oct->num_irqs = msix_allocated; dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); return 0; enable_msix_err: if (msix_allocated > 0) pci_disable_msix(oct->pdev); kfree(oct->msix_entries); oct->msix_entries = NULL; msix_alloc_err: return -1; } /** * octep_disable_msix() - disable MSI-x interrupts. * * @oct: Octeon device private data structure. * * Disable MSI-x on the Octeon device. */ static void octep_disable_msix(struct octep_device *oct) { pci_disable_msix(oct->pdev); kfree(oct->msix_entries); oct->msix_entries = NULL; dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); } /** * octep_non_ioq_intr_handler() - common handler for all generic interrupts. * * @irq: Interrupt number. * @data: interrupt data. * * this is common handler for all non-queue (generic) interrupts. */ static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) { struct octep_device *oct = data; return oct->hw_ops.non_ioq_intr_handler(oct); } /** * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. * * @irq: Interrupt number. * @data: interrupt data contains pointers to Tx/Rx queue private data * and correspong NAPI context. * * this is common handler for all non-queue (generic) interrupts. */ static irqreturn_t octep_ioq_intr_handler(int irq, void *data) { struct octep_ioq_vector *ioq_vector = data; struct octep_device *oct = ioq_vector->octep_dev; return oct->hw_ops.ioq_intr_handler(ioq_vector); } /** * octep_request_irqs() - Register interrupt handlers. * * @oct: Octeon device private data structure. * * Register handlers for all queue and non-queue interrupts. * * Return: 0, on successful registration of all interrupt handlers. * -1, on any error. */ static int octep_request_irqs(struct octep_device *oct) { struct net_device *netdev = oct->netdev; struct octep_ioq_vector *ioq_vector; struct msix_entry *msix_entry; char **non_ioq_msix_names; int num_non_ioq_msix; int ret, i, j; num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); if (!oct->non_ioq_irq_names) goto alloc_err; /* First few MSI-X interrupts are non-queue interrupts */ for (i = 0; i < num_non_ioq_msix; i++) { char *irq_name; irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; msix_entry = &oct->msix_entries[i]; snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, "%s-%s", netdev->name, non_ioq_msix_names[i]); ret = request_irq(msix_entry->vector, octep_non_ioq_intr_handler, 0, irq_name, oct); if (ret) { netdev_err(netdev, "request_irq failed for %s; err=%d", irq_name, ret); goto non_ioq_irq_err; } } /* Request IRQs for Tx/Rx queues */ for (j = 0; j < oct->num_oqs; j++) { ioq_vector = oct->ioq_vector[j]; msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; snprintf(ioq_vector->name, sizeof(ioq_vector->name), "%s-q%d", netdev->name, j); ret = request_irq(msix_entry->vector, octep_ioq_intr_handler, 0, ioq_vector->name, ioq_vector); if (ret) { netdev_err(netdev, "request_irq failed for Q-%d; err=%d", j, ret); goto ioq_irq_err; } cpumask_set_cpu(j % num_online_cpus(), &ioq_vector->affinity_mask); irq_set_affinity_hint(msix_entry->vector, &ioq_vector->affinity_mask); } return 0; ioq_irq_err: while (j) { --j; ioq_vector = oct->ioq_vector[j]; msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; irq_set_affinity_hint(msix_entry->vector, NULL); free_irq(msix_entry->vector, ioq_vector); } non_ioq_irq_err: while (i) { --i; free_irq(oct->msix_entries[i].vector, oct); } kfree(oct->non_ioq_irq_names); oct->non_ioq_irq_names = NULL; alloc_err: return -1; } /** * octep_free_irqs() - free all registered interrupts. * * @oct: Octeon device private data structure. * * Free all queue and non-queue interrupts of the Octeon device. */ static void octep_free_irqs(struct octep_device *oct) { int i; /* First few MSI-X interrupts are non queue interrupts; free them */ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) free_irq(oct->msix_entries[i].vector, oct); kfree(oct->non_ioq_irq_names); /* Free IRQs for Input/Output (Tx/Rx) queues */ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); } netdev_info(oct->netdev, "IRQs freed\n"); } /** * octep_setup_irqs() - setup interrupts for the Octeon device. * * @oct: Octeon device private data structure. * * Allocate data structures to hold per interrupt information, allocate/enable * MSI-x interrupt and register interrupt handlers. * * Return: 0, on successful allocation and registration of all interrupts. * -1, on any error. */ static int octep_setup_irqs(struct octep_device *oct) { if (octep_alloc_ioq_vectors(oct)) goto ioq_vector_err; if (octep_enable_msix_range(oct)) goto enable_msix_err; if (octep_request_irqs(oct)) goto request_irq_err; return 0; request_irq_err: octep_disable_msix(oct); enable_msix_err: octep_free_ioq_vectors(oct); ioq_vector_err: return -1; } /** * octep_clean_irqs() - free all interrupts and its resources. * * @oct: Octeon device private data structure. */ static void octep_clean_irqs(struct octep_device *oct) { octep_free_irqs(oct); octep_disable_msix(oct); octep_free_ioq_vectors(oct); } /** * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. * * @iq: Octeon Tx queue data structure. * @oq: Octeon Rx queue data structure. */ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) { u32 pkts_pend = oq->pkts_pending; netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); if (iq->pkts_processed) { writel(iq->pkts_processed, iq->inst_cnt_reg); iq->pkt_in_done -= iq->pkts_processed; iq->pkts_processed = 0; } if (oq->last_pkt_count - pkts_pend) { writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); oq->last_pkt_count = pkts_pend; } /* Flush the previous wrties before writing to RESEND bit */ wmb(); writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); } /** * octep_napi_poll() - NAPI poll function for Tx/Rx. * * @napi: pointer to napi context. * @budget: max number of packets to be processed in single invocation. */ static int octep_napi_poll(struct napi_struct *napi, int budget) { struct octep_ioq_vector *ioq_vector = container_of(napi, struct octep_ioq_vector, napi); u32 tx_pending, rx_done; tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); rx_done = octep_oq_process_rx(ioq_vector->oq, budget); /* need more polling if tx completion processing is still pending or * processed at least 'budget' number of rx packets. */ if (tx_pending || rx_done >= budget) return budget; napi_complete(napi); octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); return rx_done; } /** * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. * * @oct: Octeon device private data structure. */ static void octep_napi_add(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) { netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_napi_poll); oct->oq[i]->napi = &oct->ioq_vector[i]->napi; } } /** * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. * * @oct: Octeon device private data structure. */ static void octep_napi_delete(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) { netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); netif_napi_del(&oct->ioq_vector[i]->napi); oct->oq[i]->napi = NULL; } } /** * octep_napi_enable() - enable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure. */ static void octep_napi_enable(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) { netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); napi_enable(&oct->ioq_vector[i]->napi); } } /** * octep_napi_disable() - disable NAPI for all Tx/Rx queues. * * @oct: Octeon device private data structure. */ static void octep_napi_disable(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) { netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); napi_disable(&oct->ioq_vector[i]->napi); } } static void octep_link_up(struct net_device *netdev) { netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); } /** * octep_open() - start the octeon network device. * * @netdev: pointer to kernel network device. * * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues * and interrupts.. * * Return: 0, on successfully setting up device and bring it up. * -1, on any error. */ static int octep_open(struct net_device *netdev) { struct octep_device *oct = netdev_priv(netdev); int err, ret; netdev_info(netdev, "Starting netdev ...\n"); netif_carrier_off(netdev); oct->hw_ops.reset_io_queues(oct); if (octep_setup_iqs(oct)) goto setup_iq_err; if (octep_setup_oqs(oct)) goto setup_oq_err; if (octep_setup_irqs(oct)) goto setup_irq_err; err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); if (err) goto set_queues_err; err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); if (err) goto set_queues_err; octep_napi_add(oct); octep_napi_enable(oct); oct->link_info.admin_up = 1; octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true, false); octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true, false); oct->poll_non_ioq_intr = false; /* Enable the input and output queues for this Octeon device */ oct->hw_ops.enable_io_queues(oct); /* Enable Octeon device interrupts */ oct->hw_ops.enable_interrupts(oct); octep_oq_dbell_init(oct); ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID); if (ret > 0) octep_link_up(netdev); return 0; set_queues_err: octep_clean_irqs(oct); setup_irq_err: octep_free_oqs(oct); setup_oq_err: octep_free_iqs(oct); setup_iq_err: return -1; } /** * octep_stop() - stop the octeon network device. * * @netdev: pointer to kernel network device. * * stop the device Tx/Rx operations, bring down the link and * free up all resources allocated for Tx/Rx queues and interrupts. */ static int octep_stop(struct net_device *netdev) { struct octep_device *oct = netdev_priv(netdev); netdev_info(netdev, "Stopping the device ...\n"); octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false, false); octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false, false); /* Stop Tx from stack */ netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); netif_tx_disable(netdev); oct->link_info.admin_up = 0; oct->link_info.oper_up = 0; oct->hw_ops.disable_interrupts(oct); octep_napi_disable(oct); octep_napi_delete(oct); octep_clean_irqs(oct); octep_clean_iqs(oct); oct->hw_ops.disable_io_queues(oct); oct->hw_ops.reset_io_queues(oct); octep_free_oqs(oct); octep_free_iqs(oct); oct->poll_non_ioq_intr = true; queue_delayed_work(octep_wq, &oct->intr_poll_task, msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); netdev_info(netdev, "Device stopped !!\n"); return 0; } /** * octep_iq_full_check() - check if a Tx queue is full. * * @iq: Octeon Tx queue data structure. * * Return: 0, if the Tx queue is not full. * 1, if the Tx queue is full. */ static inline int octep_iq_full_check(struct octep_iq *iq) { if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= OCTEP_WAKE_QUEUE_THRESHOLD)) return 0; /* Stop the queue if unable to send */ netif_stop_subqueue(iq->netdev, iq->q_no); /* check again and restart the queue, in case NAPI has just freed * enough Tx ring entries. */ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= OCTEP_WAKE_QUEUE_THRESHOLD)) { netif_start_subqueue(iq->netdev, iq->q_no); iq->stats.restart_cnt++; return 0; } return 1; } /** * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. * * @skb: packet skbuff pointer. * @netdev: kernel network device. * * Return: NETDEV_TX_BUSY, if Tx Queue is full. * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. */ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octep_device *oct = netdev_priv(netdev); struct octep_tx_sglist_desc *sglist; struct octep_tx_buffer *tx_buffer; struct octep_tx_desc_hw *hw_desc; struct skb_shared_info *shinfo; struct octep_instr_hdr *ih; struct octep_iq *iq; skb_frag_t *frag; u16 nr_frags, si; u16 q_no, wi; q_no = skb_get_queue_mapping(skb); if (q_no >= oct->num_iqs) { netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); q_no = q_no % oct->num_iqs; } iq = oct->iq[q_no]; if (octep_iq_full_check(iq)) { iq->stats.tx_busy++; return NETDEV_TX_BUSY; } shinfo = skb_shinfo(skb); nr_frags = shinfo->nr_frags; wi = iq->host_write_index; hw_desc = &iq->desc_ring[wi]; hw_desc->ih64 = 0; tx_buffer = iq->buff_info + wi; tx_buffer->skb = skb; ih = &hw_desc->ih; ih->tlen = skb->len; ih->pkind = oct->pkind; if (!nr_frags) { tx_buffer->gather = 0; tx_buffer->dma = dma_map_single(iq->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(iq->dev, tx_buffer->dma)) goto dma_map_err; hw_desc->dptr = tx_buffer->dma; } else { /* Scatter/Gather */ dma_addr_t dma; u16 len; sglist = tx_buffer->sglist; ih->gsz = nr_frags + 1; ih->gather = 1; tx_buffer->gather = 1; len = skb_headlen(skb); dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(iq->dev, dma)) goto dma_map_err; dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, OCTEP_SGLIST_SIZE_PER_PKT, DMA_TO_DEVICE); memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); sglist[0].len[3] = len; sglist[0].dma_ptr[0] = dma; si = 1; /* entry 0 is main skb, mapped above */ frag = &shinfo->frags[0]; while (nr_frags--) { len = skb_frag_size(frag); dma = skb_frag_dma_map(iq->dev, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(iq->dev, dma)) goto dma_map_sg_err; sglist[si >> 2].len[3 - (si & 3)] = len; sglist[si >> 2].dma_ptr[si & 3] = dma; frag++; si++; } dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, OCTEP_SGLIST_SIZE_PER_PKT, DMA_TO_DEVICE); hw_desc->dptr = tx_buffer->sglist_dma; } /* Flush the hw descriptor before writing to doorbell */ wmb(); /* Ring Doorbell to notify the NIC there is a new packet */ writel(1, iq->doorbell_reg); atomic_inc(&iq->instr_pending); wi++; if (wi == iq->max_count) wi = 0; iq->host_write_index = wi; netdev_tx_sent_queue(iq->netdev_q, skb->len); iq->stats.instr_posted++; skb_tx_timestamp(skb); return NETDEV_TX_OK; dma_map_sg_err: if (si > 0) { dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], sglist[0].len[3], DMA_TO_DEVICE); sglist[0].len[3] = 0; } while (si > 1) { dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); sglist[si >> 2].len[3 - (si & 3)] = 0; si--; } tx_buffer->gather = 0; dma_map_err: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /** * octep_get_stats64() - Get Octeon network device statistics. * * @netdev: kernel network device. * @stats: pointer to stats structure to be filled in. */ static void octep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { u64 tx_packets, tx_bytes, rx_packets, rx_bytes; struct octep_device *oct = netdev_priv(netdev); int q; if (netif_running(netdev)) octep_ctrl_net_get_if_stats(oct, OCTEP_CTRL_NET_INVALID_VFID, &oct->iface_rx_stats, &oct->iface_tx_stats); tx_packets = 0; tx_bytes = 0; rx_packets = 0; rx_bytes = 0; for (q = 0; q < oct->num_oqs; q++) { struct octep_iq *iq = oct->iq[q]; struct octep_oq *oq = oct->oq[q]; tx_packets += iq->stats.instr_completed; tx_bytes += iq->stats.bytes_sent; rx_packets += oq->stats.packets; rx_bytes += oq->stats.bytes; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; stats->rx_bytes = rx_bytes; stats->multicast = oct->iface_rx_stats.mcast_pkts; stats->rx_errors = oct->iface_rx_stats.err_pkts; stats->collisions = oct->iface_tx_stats.xscol; stats->tx_fifo_errors = oct->iface_tx_stats.undflw; } /** * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. * * @work: pointer to Tx queue timeout work_struct * * Stop and start the device so that it frees up all queue resources * and restarts the queues, that potentially clears a Tx queue timeout * condition. **/ static void octep_tx_timeout_task(struct work_struct *work) { struct octep_device *oct = container_of(work, struct octep_device, tx_timeout_task); struct net_device *netdev = oct->netdev; rtnl_lock(); if (netif_running(netdev)) { octep_stop(netdev); octep_open(netdev); } rtnl_unlock(); } /** * octep_tx_timeout() - Handle Tx Queue timeout. * * @netdev: pointer to kernel network device. * @txqueue: Timed out Tx queue number. * * Schedule a work to handle Tx queue timeout. */ static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct octep_device *oct = netdev_priv(netdev); queue_work(octep_wq, &oct->tx_timeout_task); } static int octep_set_mac(struct net_device *netdev, void *p) { struct octep_device *oct = netdev_priv(netdev); struct sockaddr *addr = (struct sockaddr *)p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID, addr->sa_data, true); if (err) return err; memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); eth_hw_addr_set(netdev, addr->sa_data); return 0; } static int octep_change_mtu(struct net_device *netdev, int new_mtu) { struct octep_device *oct = netdev_priv(netdev); struct octep_iface_link_info *link_info; int err = 0; link_info = &oct->link_info; if (link_info->mtu == new_mtu) return 0; err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu, true); if (!err) { oct->link_info.mtu = new_mtu; netdev->mtu = new_mtu; } return err; } static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, .ndo_start_xmit = octep_start_xmit, .ndo_get_stats64 = octep_get_stats64, .ndo_tx_timeout = octep_tx_timeout, .ndo_set_mac_address = octep_set_mac, .ndo_change_mtu = octep_change_mtu, }; /** * octep_intr_poll_task - work queue task to process non-ioq interrupts. * * @work: pointer to mbox work_struct * * Process non-ioq interrupts to handle control mailbox, pfvf mailbox. **/ static void octep_intr_poll_task(struct work_struct *work) { struct octep_device *oct = container_of(work, struct octep_device, intr_poll_task.work); if (!oct->poll_non_ioq_intr) { dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n"); return; } oct->hw_ops.poll_non_ioq_interrupts(oct); queue_delayed_work(octep_wq, &oct->intr_poll_task, msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); } /** * octep_hb_timeout_task - work queue task to check firmware heartbeat. * * @work: pointer to hb work_struct * * Check for heartbeat miss count. Uninitialize oct device if miss count * exceeds configured max heartbeat miss count. * **/ static void octep_hb_timeout_task(struct work_struct *work) { struct octep_device *oct = container_of(work, struct octep_device, hb_task.work); int miss_cnt; miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); if (miss_cnt < oct->conf->max_hb_miss_cnt) { queue_delayed_work(octep_wq, &oct->hb_task, msecs_to_jiffies(oct->conf->hb_interval * 1000)); return; } dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n", miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) octep_stop(oct->netdev); rtnl_unlock(); } /** * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. * * @work: pointer to ctrl mbox work_struct * * Poll ctrl mbox message queue and handle control messages from firmware. **/ static void octep_ctrl_mbox_task(struct work_struct *work) { struct octep_device *oct = container_of(work, struct octep_device, ctrl_mbox_task); octep_ctrl_net_recv_fw_messages(oct); } static const char *octep_devid_to_str(struct octep_device *oct) { switch (oct->chip_id) { case OCTEP_PCI_DEVICE_ID_CN93_PF: return "CN93XX"; case OCTEP_PCI_DEVICE_ID_CNF95N_PF: return "CNF95N"; default: return "Unsupported"; } } /** * octep_device_setup() - Setup Octeon Device. * * @oct: Octeon device private data structure. * * Setup Octeon device hardware operations, configuration, etc ... */ int octep_device_setup(struct octep_device *oct) { struct pci_dev *pdev = oct->pdev; int i, ret; /* allocate memory for oct->conf */ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); if (!oct->conf) return -ENOMEM; /* Map BAR regions */ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { oct->mmio[i].hw_addr = ioremap(pci_resource_start(oct->pdev, i * 2), pci_resource_len(oct->pdev, i * 2)); if (!oct->mmio[i].hw_addr) goto unmap_prev; oct->mmio[i].mapped = 1; } oct->chip_id = pdev->device; oct->rev_id = pdev->revision; dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); switch (oct->chip_id) { case OCTEP_PCI_DEVICE_ID_CN93_PF: case OCTEP_PCI_DEVICE_ID_CNF95N_PF: dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); octep_device_setup_cn93_pf(oct); break; default: dev_err(&pdev->dev, "%s: unsupported device\n", __func__); goto unsupported_dev; } oct->pkind = CFG_GET_IQ_PKIND(oct->conf); ret = octep_ctrl_net_init(oct); if (ret) return ret; atomic_set(&oct->hb_miss_cnt, 0); INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); queue_delayed_work(octep_wq, &oct->hb_task, msecs_to_jiffies(oct->conf->hb_interval * 1000)); return 0; unsupported_dev: i = OCTEP_MMIO_REGIONS; unmap_prev: while (i--) iounmap(oct->mmio[i].hw_addr); kfree(oct->conf); return -1; } /** * octep_device_cleanup() - Cleanup Octeon Device. * * @oct: Octeon device private data structure. * * Cleanup Octeon device allocated resources. */ static void octep_device_cleanup(struct octep_device *oct) { int i; oct->poll_non_ioq_intr = false; cancel_delayed_work_sync(&oct->intr_poll_task); cancel_work_sync(&oct->ctrl_mbox_task); dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); for (i = 0; i < OCTEP_MAX_VF; i++) { vfree(oct->mbox[i]); oct->mbox[i] = NULL; } octep_ctrl_net_uninit(oct); cancel_delayed_work_sync(&oct->hb_task); oct->hw_ops.soft_reset(oct); for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { if (oct->mmio[i].mapped) iounmap(oct->mmio[i].hw_addr); } kfree(oct->conf); oct->conf = NULL; } static bool get_fw_ready_status(struct pci_dev *pdev) { u32 pos = 0; u16 vsec_id; u8 status; while ((pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR))) { pci_read_config_word(pdev, pos + 4, &vsec_id); #define FW_STATUS_VSEC_ID 0xA3 if (vsec_id != FW_STATUS_VSEC_ID) continue; pci_read_config_byte(pdev, (pos + 8), &status); dev_info(&pdev->dev, "Firmware ready status = %u\n", status); return status; } return false; } /** * octep_probe() - Octeon PCI device probe handler. * * @pdev: PCI device structure. * @ent: entry in Octeon PCI device ID table. * * Initializes and enables the Octeon PCI device for network operations. * Initializes Octeon private data structure and registers a network device. */ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct octep_device *octep_dev = NULL; struct net_device *netdev; int err; err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return err; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); goto err_dma_mask; } err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); if (err) { dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); goto err_pci_regions; } pci_set_master(pdev); if (!get_fw_ready_status(pdev)) { dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n"); err = -EPROBE_DEFER; goto err_alloc_netdev; } netdev = alloc_etherdev_mq(sizeof(struct octep_device), OCTEP_MAX_QUEUES); if (!netdev) { dev_err(&pdev->dev, "Failed to allocate netdev\n"); err = -ENOMEM; goto err_alloc_netdev; } SET_NETDEV_DEV(netdev, &pdev->dev); octep_dev = netdev_priv(netdev); octep_dev->netdev = netdev; octep_dev->pdev = pdev; octep_dev->dev = &pdev->dev; pci_set_drvdata(pdev, octep_dev); err = octep_device_setup(octep_dev); if (err) { dev_err(&pdev->dev, "Device setup failed\n"); goto err_octep_config; } INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task); octep_dev->poll_non_ioq_intr = true; queue_delayed_work(octep_wq, &octep_dev->intr_poll_task, msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); netdev->netdev_ops = &octep_netdev_ops; octep_set_ethtool_ops(netdev); netif_carrier_off(netdev); netdev->hw_features = NETIF_F_SG; netdev->features |= netdev->hw_features; netdev->min_mtu = OCTEP_MIN_MTU; netdev->max_mtu = OCTEP_MAX_MTU; netdev->mtu = OCTEP_DEFAULT_MTU; err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, octep_dev->mac_addr); if (err) { dev_err(&pdev->dev, "Failed to get mac address\n"); goto register_dev_err; } eth_hw_addr_set(netdev, octep_dev->mac_addr); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); goto register_dev_err; } dev_info(&pdev->dev, "Device probe successful\n"); return 0; register_dev_err: octep_device_cleanup(octep_dev); err_octep_config: free_netdev(netdev); err_alloc_netdev: pci_release_mem_regions(pdev); err_pci_regions: err_dma_mask: pci_disable_device(pdev); return err; } /** * octep_remove() - Remove Octeon PCI device from driver control. * * @pdev: PCI device structure of the Octeon device. * * Cleanup all resources allocated for the Octeon device. * Unregister from network device and disable the PCI device. */ static void octep_remove(struct pci_dev *pdev) { struct octep_device *oct = pci_get_drvdata(pdev); struct net_device *netdev; if (!oct) return; netdev = oct->netdev; if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); cancel_work_sync(&oct->tx_timeout_task); octep_device_cleanup(oct); pci_release_mem_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static struct pci_driver octep_driver = { .name = OCTEP_DRV_NAME, .id_table = octep_pci_id_tbl, .probe = octep_probe, .remove = octep_remove, }; /** * octep_init_module() - Module initialiation. * * create common resource for the driver and register PCI driver. */ static int __init octep_init_module(void) { int ret; pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); /* work queue for all deferred tasks */ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); if (!octep_wq) { pr_err("%s: Failed to create common workqueue\n", OCTEP_DRV_NAME); return -ENOMEM; } ret = pci_register_driver(&octep_driver); if (ret < 0) { pr_err("%s: Failed to register PCI driver; err=%d\n", OCTEP_DRV_NAME, ret); destroy_workqueue(octep_wq); return ret; } pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); return ret; } /** * octep_exit_module() - Module exit routine. * * unregister the driver with PCI subsystem and cleanup common resources. */ static void __exit octep_exit_module(void) { pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); pci_unregister_driver(&octep_driver); destroy_workqueue(octep_wq); pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); } module_init(octep_init_module); module_exit(octep_exit_module);
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/pci.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include "octep_config.h" #include "octep_main.h" static void octep_oq_reset_indices(struct octep_oq *oq) { oq->host_read_idx = 0; oq->host_refill_idx = 0; oq->refill_count = 0; oq->last_pkt_count = 0; oq->pkts_pending = 0; } /** * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. * * @oq: Octeon Rx queue data structure. * * Return: 0, if successfully filled receive buffers for all descriptors. * -1, if failed to allocate a buffer or failed to map for DMA. */ static int octep_oq_fill_ring_buffers(struct octep_oq *oq) { struct octep_oq_desc_hw *desc_ring = oq->desc_ring; struct page *page; u32 i; for (i = 0; i < oq->max_count; i++) { page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "Rx buffer alloc failed\n"); goto rx_buf_alloc_err; } desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { dev_err(oq->dev, "OQ-%d buffer alloc: DMA mapping error!\n", oq->q_no); put_page(page); goto dma_map_err; } oq->buff_info[i].page = page; } return 0; dma_map_err: rx_buf_alloc_err: while (i) { i--; dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); put_page(oq->buff_info[i].page); oq->buff_info[i].page = NULL; } return -1; } /** * octep_oq_refill() - refill buffers for used Rx ring descriptors. * * @oct: Octeon device private data structure. * @oq: Octeon Rx queue data structure. * * Return: number of descriptors successfully refilled with receive buffers. */ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) { struct octep_oq_desc_hw *desc_ring = oq->desc_ring; struct page *page; u32 refill_idx, i; refill_idx = oq->host_refill_idx; for (i = 0; i < oq->refill_count; i++) { page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "refill: rx buffer alloc failed\n"); oq->stats.alloc_failures++; break; } desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { dev_err(oq->dev, "OQ-%d buffer refill: DMA mapping error!\n", oq->q_no); put_page(page); oq->stats.alloc_failures++; break; } oq->buff_info[refill_idx].page = page; refill_idx++; if (refill_idx == oq->max_count) refill_idx = 0; } oq->host_refill_idx = refill_idx; oq->refill_count -= i; return i; } /** * octep_setup_oq() - Setup a Rx queue. * * @oct: Octeon device private data structure. * @q_no: Rx queue number to be setup. * * Allocate resources for a Rx queue. */ static int octep_setup_oq(struct octep_device *oct, int q_no) { struct octep_oq *oq; u32 desc_ring_size; oq = vzalloc(sizeof(*oq)); if (!oq) goto create_oq_fail; oct->oq[q_no] = oq; oq->octep_dev = oct; oq->netdev = oct->netdev; oq->dev = &oct->pdev->dev; oq->q_no = q_no; oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); oq->ring_size_mask = oq->max_count - 1; oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; /* When the hardware/firmware supports additional capabilities, * additional header is filled-in by Octeon after length field in * Rx packets. this header contains additional packet information. */ if (oct->caps_enabled) oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, &oq->desc_ring_dma, GFP_KERNEL); if (unlikely(!oq->desc_ring)) { dev_err(oq->dev, "Failed to allocate DMA memory for OQ-%d !!\n", q_no); goto desc_dma_alloc_err; } oq->buff_info = vcalloc(oq->max_count, OCTEP_OQ_RECVBUF_SIZE); if (unlikely(!oq->buff_info)) { dev_err(&oct->pdev->dev, "Failed to allocate buffer info for OQ-%d\n", q_no); goto buf_list_err; } if (octep_oq_fill_ring_buffers(oq)) goto oq_fill_buff_err; octep_oq_reset_indices(oq); oct->hw_ops.setup_oq_regs(oct, q_no); oct->num_oqs++; return 0; oq_fill_buff_err: vfree(oq->buff_info); oq->buff_info = NULL; buf_list_err: dma_free_coherent(oq->dev, desc_ring_size, oq->desc_ring, oq->desc_ring_dma); oq->desc_ring = NULL; desc_dma_alloc_err: vfree(oq); oct->oq[q_no] = NULL; create_oq_fail: return -1; } /** * octep_oq_free_ring_buffers() - Free ring buffers. * * @oq: Octeon Rx queue data structure. * * Free receive buffers in unused Rx queue descriptors. */ static void octep_oq_free_ring_buffers(struct octep_oq *oq) { struct octep_oq_desc_hw *desc_ring = oq->desc_ring; int i; if (!oq->desc_ring || !oq->buff_info) return; for (i = 0; i < oq->max_count; i++) { if (oq->buff_info[i].page) { dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); put_page(oq->buff_info[i].page); oq->buff_info[i].page = NULL; desc_ring[i].buffer_ptr = 0; } } octep_oq_reset_indices(oq); } /** * octep_free_oq() - Free Rx queue resources. * * @oq: Octeon Rx queue data structure. * * Free all resources of a Rx queue. */ static int octep_free_oq(struct octep_oq *oq) { struct octep_device *oct = oq->octep_dev; int q_no = oq->q_no; octep_oq_free_ring_buffers(oq); vfree(oq->buff_info); if (oq->desc_ring) dma_free_coherent(oq->dev, oq->max_count * OCTEP_OQ_DESC_SIZE, oq->desc_ring, oq->desc_ring_dma); vfree(oq); oct->oq[q_no] = NULL; oct->num_oqs--; return 0; } /** * octep_setup_oqs() - setup resources for all Rx queues. * * @oct: Octeon device private data structure. */ int octep_setup_oqs(struct octep_device *oct) { int i, retval = 0; oct->num_oqs = 0; for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { retval = octep_setup_oq(oct, i); if (retval) { dev_err(&oct->pdev->dev, "Failed to setup OQ(RxQ)-%d.\n", i); goto oq_setup_err; } dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); } return 0; oq_setup_err: while (i) { i--; octep_free_oq(oct->oq[i]); } return -1; } /** * octep_oq_dbell_init() - Initialize Rx queue doorbell. * * @oct: Octeon device private data structure. * * Write number of descriptors to Rx queue doorbell register. */ void octep_oq_dbell_init(struct octep_device *oct) { int i; for (i = 0; i < oct->num_oqs; i++) writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); } /** * octep_free_oqs() - Free resources of all Rx queues. * * @oct: Octeon device private data structure. */ void octep_free_oqs(struct octep_device *oct) { int i; for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { if (!oct->oq[i]) continue; octep_free_oq(oct->oq[i]); dev_dbg(&oct->pdev->dev, "Successfully freed OQ(RxQ)-%d.\n", i); } } /** * octep_oq_check_hw_for_pkts() - Check for new Rx packets. * * @oct: Octeon device private data structure. * @oq: Octeon Rx queue data structure. * * Return: packets received after previous check. */ static int octep_oq_check_hw_for_pkts(struct octep_device *oct, struct octep_oq *oq) { u32 pkt_count, new_pkts; pkt_count = readl(oq->pkts_sent_reg); new_pkts = pkt_count - oq->last_pkt_count; /* Clear the hardware packets counter register if the rx queue is * being processed continuously with-in a single interrupt and * reached half its max value. * this counter is not cleared every time read, to save write cycles. */ if (unlikely(pkt_count > 0xF0000000U)) { writel(pkt_count, oq->pkts_sent_reg); pkt_count = readl(oq->pkts_sent_reg); new_pkts += pkt_count; } oq->last_pkt_count = pkt_count; oq->pkts_pending += new_pkts; return new_pkts; } /** * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. * * @oct: Octeon device private data structure. * @oq: Octeon Rx queue data structure. * @pkts_to_process: number of packets to be processed. * * Process the new packets in Rx queue. * Packets larger than single Rx buffer arrive in consecutive descriptors. * But, count returned by the API only accounts full packets, not fragments. * * Return: number of packets processed and pushed to stack. */ static int __octep_oq_process_rx(struct octep_device *oct, struct octep_oq *oq, u16 pkts_to_process) { struct octep_oq_resp_hw_ext *resp_hw_ext = NULL; struct octep_rx_buffer *buff_info; struct octep_oq_resp_hw *resp_hw; u32 pkt, rx_bytes, desc_used; struct sk_buff *skb; u16 data_offset; u32 read_idx; read_idx = oq->host_read_idx; rx_bytes = 0; desc_used = 0; for (pkt = 0; pkt < pkts_to_process; pkt++) { buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); resp_hw = page_address(buff_info->page); buff_info->page = NULL; /* Swap the length field that is in Big-Endian to CPU */ buff_info->len = be64_to_cpu(resp_hw->length); if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) { /* Extended response header is immediately after * response header (resp_hw) */ resp_hw_ext = (struct octep_oq_resp_hw_ext *) (resp_hw + 1); buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; /* Packet Data is immediately after * extended response header. */ data_offset = OCTEP_OQ_RESP_HW_SIZE + OCTEP_OQ_RESP_HW_EXT_SIZE; } else { /* Data is immediately after * Hardware Rx response header. */ data_offset = OCTEP_OQ_RESP_HW_SIZE; } rx_bytes += buff_info->len; if (buff_info->len <= oq->max_single_buffer_size) { skb = build_skb((void *)resp_hw, PAGE_SIZE); skb_reserve(skb, data_offset); skb_put(skb, buff_info->len); read_idx++; desc_used++; if (read_idx == oq->max_count) read_idx = 0; } else { struct skb_shared_info *shinfo; u16 data_len; skb = build_skb((void *)resp_hw, PAGE_SIZE); skb_reserve(skb, data_offset); /* Head fragment includes response header(s); * subsequent fragments contains only data. */ skb_put(skb, oq->max_single_buffer_size); read_idx++; desc_used++; if (read_idx == oq->max_count) read_idx = 0; shinfo = skb_shinfo(skb); data_len = buff_info->len - oq->max_single_buffer_size; while (data_len) { dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); buff_info = (struct octep_rx_buffer *) &oq->buff_info[read_idx]; if (data_len < oq->buffer_size) { buff_info->len = data_len; data_len = 0; } else { buff_info->len = oq->buffer_size; data_len -= oq->buffer_size; } skb_add_rx_frag(skb, shinfo->nr_frags, buff_info->page, 0, buff_info->len, buff_info->len); buff_info->page = NULL; read_idx++; desc_used++; if (read_idx == oq->max_count) read_idx = 0; } } skb->dev = oq->netdev; skb->protocol = eth_type_trans(skb, skb->dev); if (resp_hw_ext && resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; napi_gro_receive(oq->napi, skb); } oq->host_read_idx = read_idx; oq->refill_count += desc_used; oq->stats.packets += pkt; oq->stats.bytes += rx_bytes; return pkt; } /** * octep_oq_process_rx() - Process Rx queue. * * @oq: Octeon Rx queue data structure. * @budget: max number of packets can be processed in one invocation. * * Check for newly received packets and process them. * Keeps checking for new packets until budget is used or no new packets seen. * * Return: number of packets processed. */ int octep_oq_process_rx(struct octep_oq *oq, int budget) { u32 pkts_available, pkts_processed, total_pkts_processed; struct octep_device *oct = oq->octep_dev; pkts_available = 0; pkts_processed = 0; total_pkts_processed = 0; while (total_pkts_processed < budget) { /* update pending count only when current one exhausted */ if (oq->pkts_pending == 0) octep_oq_check_hw_for_pkts(oct, oq); pkts_available = min(budget - total_pkts_processed, oq->pkts_pending); if (!pkts_available) break; pkts_processed = __octep_oq_process_rx(oct, oq, pkts_available); oq->pkts_pending -= pkts_processed; total_pkts_processed += pkts_processed; } if (oq->refill_count >= oq->refill_threshold) { u32 desc_refilled = octep_oq_refill(oct, oq); /* flush pending writes before updating credits */ wmb(); writel(desc_refilled, oq->pkts_credit_reg); } return total_pkts_processed; }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/pci.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include "octep_config.h" #include "octep_main.h" /* Reset various index of Tx queue data structure. */ static void octep_iq_reset_indices(struct octep_iq *iq) { iq->fill_cnt = 0; iq->host_write_index = 0; iq->octep_read_index = 0; iq->flush_index = 0; iq->pkts_processed = 0; iq->pkt_in_done = 0; atomic_set(&iq->instr_pending, 0); } /** * octep_iq_process_completions() - Process Tx queue completions. * * @iq: Octeon Tx queue data structure. * @budget: max number of completions to be processed in one invocation. */ int octep_iq_process_completions(struct octep_iq *iq, u16 budget) { u32 compl_pkts, compl_bytes, compl_sg; struct octep_device *oct = iq->octep_dev; struct octep_tx_buffer *tx_buffer; struct skb_shared_info *shinfo; u32 fi = iq->flush_index; struct sk_buff *skb; u8 frags, i; compl_pkts = 0; compl_sg = 0; compl_bytes = 0; iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); while (likely(budget && (fi != iq->octep_read_index))) { tx_buffer = iq->buff_info + fi; skb = tx_buffer->skb; fi++; if (unlikely(fi == iq->max_count)) fi = 0; compl_bytes += skb->len; compl_pkts++; budget--; if (!tx_buffer->gather) { dma_unmap_single(iq->dev, tx_buffer->dma, tx_buffer->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); continue; } /* Scatter/Gather */ shinfo = skb_shinfo(skb); frags = shinfo->nr_frags; compl_sg++; dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } dev_kfree_skb_any(skb); } iq->pkts_processed += compl_pkts; atomic_sub(compl_pkts, &iq->instr_pending); iq->stats.instr_completed += compl_pkts; iq->stats.bytes_sent += compl_bytes; iq->stats.sgentry_sent += compl_sg; iq->flush_index = fi; netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && ((iq->max_count - atomic_read(&iq->instr_pending)) > OCTEP_WAKE_QUEUE_THRESHOLD)) netif_wake_subqueue(iq->netdev, iq->q_no); return !budget; } /** * octep_iq_free_pending() - Free Tx buffers for pending completions. * * @iq: Octeon Tx queue data structure. */ static void octep_iq_free_pending(struct octep_iq *iq) { struct octep_tx_buffer *tx_buffer; struct skb_shared_info *shinfo; u32 fi = iq->flush_index; struct sk_buff *skb; u8 frags, i; while (fi != iq->host_write_index) { tx_buffer = iq->buff_info + fi; skb = tx_buffer->skb; fi++; if (unlikely(fi == iq->max_count)) fi = 0; if (!tx_buffer->gather) { dma_unmap_single(iq->dev, tx_buffer->dma, tx_buffer->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); continue; } /* Scatter/Gather */ shinfo = skb_shinfo(skb); frags = shinfo->nr_frags; dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); i = 1; /* entry 0 is main skb, unmapped above */ while (frags--) { dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); i++; } dev_kfree_skb_any(skb); } atomic_set(&iq->instr_pending, 0); iq->flush_index = fi; netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); } /** * octep_clean_iqs() - Clean Tx queues to shutdown the device. * * @oct: Octeon device private data structure. * * Free the buffers in Tx queue descriptors pending completion and * reset queue indices */ void octep_clean_iqs(struct octep_device *oct) { int i; for (i = 0; i < oct->num_iqs; i++) { octep_iq_free_pending(oct->iq[i]); octep_iq_reset_indices(oct->iq[i]); } } /** * octep_setup_iq() - Setup a Tx queue. * * @oct: Octeon device private data structure. * @q_no: Tx queue number to be setup. * * Allocate resources for a Tx queue. */ static int octep_setup_iq(struct octep_device *oct, int q_no) { u32 desc_ring_size, buff_info_size, sglist_size; struct octep_iq *iq; int i; iq = vzalloc(sizeof(*iq)); if (!iq) goto iq_alloc_err; oct->iq[q_no] = iq; iq->octep_dev = oct; iq->netdev = oct->netdev; iq->dev = &oct->pdev->dev; iq->q_no = q_no; iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); iq->ring_size_mask = iq->max_count - 1; iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); /* Allocate memory for hardware queue descriptors */ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, &iq->desc_ring_dma, GFP_KERNEL); if (unlikely(!iq->desc_ring)) { dev_err(iq->dev, "Failed to allocate DMA memory for IQ-%d\n", q_no); goto desc_dma_alloc_err; } /* Allocate memory for hardware SGLIST descriptors */ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * CFG_GET_IQ_NUM_DESC(oct->conf); iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, &iq->sglist_dma, GFP_KERNEL); if (unlikely(!iq->sglist)) { dev_err(iq->dev, "Failed to allocate DMA memory for IQ-%d SGLIST\n", q_no); goto sglist_alloc_err; } /* allocate memory to manage Tx packets pending completion */ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; iq->buff_info = vzalloc(buff_info_size); if (!iq->buff_info) { dev_err(iq->dev, "Failed to allocate buff info for IQ-%d\n", q_no); goto buff_info_err; } /* Setup sglist addresses in tx_buffer entries */ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { struct octep_tx_buffer *tx_buffer; tx_buffer = &iq->buff_info[i]; tx_buffer->sglist = &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; tx_buffer->sglist_dma = iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); } octep_iq_reset_indices(iq); oct->hw_ops.setup_iq_regs(oct, q_no); oct->num_iqs++; return 0; buff_info_err: dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); sglist_alloc_err: dma_free_coherent(iq->dev, desc_ring_size, iq->desc_ring, iq->desc_ring_dma); desc_dma_alloc_err: vfree(iq); oct->iq[q_no] = NULL; iq_alloc_err: return -1; } /** * octep_free_iq() - Free Tx queue resources. * * @iq: Octeon Tx queue data structure. * * Free all the resources allocated for a Tx queue. */ static void octep_free_iq(struct octep_iq *iq) { struct octep_device *oct = iq->octep_dev; u64 desc_ring_size, sglist_size; int q_no = iq->q_no; desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); vfree(iq->buff_info); if (iq->desc_ring) dma_free_coherent(iq->dev, desc_ring_size, iq->desc_ring, iq->desc_ring_dma); sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * CFG_GET_IQ_NUM_DESC(oct->conf); if (iq->sglist) dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); vfree(iq); oct->iq[q_no] = NULL; oct->num_iqs--; } /** * octep_setup_iqs() - setup resources for all Tx queues. * * @oct: Octeon device private data structure. */ int octep_setup_iqs(struct octep_device *oct) { int i; oct->num_iqs = 0; for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { if (octep_setup_iq(oct, i)) { dev_err(&oct->pdev->dev, "Failed to setup IQ(TxQ)-%d.\n", i); goto iq_setup_err; } dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); } return 0; iq_setup_err: while (i) { i--; octep_free_iq(oct->iq[i]); } return -1; } /** * octep_free_iqs() - Free resources of all Tx queues. * * @oct: Octeon device private data structure. */ void octep_free_iqs(struct octep_device *oct) { int i; for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { octep_free_iq(oct->iq[i]); dev_dbg(&oct->pdev->dev, "Successfully destroyed IQ(TxQ)-%d.\n", i); } oct->num_iqs = 0; }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/string.h> #include <linux/types.h> #include <linux/etherdevice.h> #include <linux/pci.h> #include <linux/wait.h> #include "octep_config.h" #include "octep_main.h" #include "octep_ctrl_net.h" /* Control plane version */ #define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0) static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr); static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu); static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac); static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state); static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info); static atomic_t ctrl_net_msg_id; /* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */ static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = { [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] = OCTEP_CP_VERSION(1, 0, 0) }; /* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */ static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = { [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] = OCTEP_CP_VERSION(1, 0, 0) }; static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf, u16 sz, int vfid) { msg->hdr.s.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; msg->hdr.s.msg_id = atomic_inc_return(&ctrl_net_msg_id) & GENMASK(sizeof(msg->hdr.s.msg_id) * BITS_PER_BYTE, 0); msg->hdr.s.sz = req_hdr_sz + sz; msg->sg_num = 1; msg->sg_list[0].msg = buf; msg->sg_list[0].sz = msg->hdr.s.sz; if (vfid != OCTEP_CTRL_NET_INVALID_VFID) { msg->hdr.s.is_vf = 1; msg->hdr.s.vf_idx = vfid; } } static int octep_send_mbox_req(struct octep_device *oct, struct octep_ctrl_net_wait_data *d, bool wait_for_response) { int err, ret, cmd; /* check if firmware is compatible for this request */ cmd = d->data.req.hdr.s.cmd; if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version || octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version) return -EOPNOTSUPP; err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg); if (err < 0) return err; if (!wait_for_response) return 0; d->done = 0; INIT_LIST_HEAD(&d->list); list_add_tail(&d->list, &oct->ctrl_req_wait_list); ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q, (d->done != 0), msecs_to_jiffies(500)); list_del(&d->list); if (ret == 0 || ret == 1) return -EAGAIN; /** * (ret == 0) cond = false && timeout, return 0 * (ret < 0) interrupted by signal, return 0 * (ret == 1) cond = true && timeout, return 1 * (ret >= 1) cond = true && !timeout, return 1 */ if (d->data.resp.hdr.s.reply != OCTEP_CTRL_NET_REPLY_OK) return -EAGAIN; return 0; } int octep_ctrl_net_init(struct octep_device *oct) { struct octep_ctrl_mbox *ctrl_mbox; struct pci_dev *pdev = oct->pdev; int ret; init_waitqueue_head(&oct->ctrl_req_wait_q); INIT_LIST_HEAD(&oct->ctrl_req_wait_list); /* Initialize control mbox */ ctrl_mbox = &oct->ctrl_mbox; ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT; ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); ret = octep_ctrl_mbox_init(ctrl_mbox); if (ret) { dev_err(&pdev->dev, "Failed to initialize control mbox\n"); return ret; } dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n", ctrl_mbox->version, ctrl_mbox->min_fw_version, ctrl_mbox->max_fw_version); oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz; return 0; } int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; int err; init_send_req(&d.msg, (void *)req, state_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; req->link.cmd = OCTEP_CTRL_NET_CMD_GET; err = octep_send_mbox_req(oct, &d, true); if (err < 0) return err; return d.data.resp.link.state; } int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up, bool wait_for_response) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; init_send_req(&d.msg, req, state_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; req->link.cmd = OCTEP_CTRL_NET_CMD_SET; req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; return octep_send_mbox_req(oct, &d, wait_for_response); } int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up, bool wait_for_response) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; init_send_req(&d.msg, req, state_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE; req->link.cmd = OCTEP_CTRL_NET_CMD_SET; req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; return octep_send_mbox_req(oct, &d, wait_for_response); } int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; int err; init_send_req(&d.msg, req, mac_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; req->link.cmd = OCTEP_CTRL_NET_CMD_GET; err = octep_send_mbox_req(oct, &d, true); if (err < 0) return err; memcpy(addr, d.data.resp.mac.addr, ETH_ALEN); return 0; } int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr, bool wait_for_response) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; init_send_req(&d.msg, req, mac_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; req->mac.cmd = OCTEP_CTRL_NET_CMD_SET; memcpy(&req->mac.addr, addr, ETH_ALEN); return octep_send_mbox_req(oct, &d, wait_for_response); } int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu, bool wait_for_response) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; init_send_req(&d.msg, req, mtu_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU; req->mtu.cmd = OCTEP_CTRL_NET_CMD_SET; req->mtu.val = mtu; return octep_send_mbox_req(oct, &d, wait_for_response); } int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid, struct octep_iface_rx_stats *rx_stats, struct octep_iface_tx_stats *tx_stats) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; struct octep_ctrl_net_h2f_resp *resp; int err; init_send_req(&d.msg, req, 0, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS; err = octep_send_mbox_req(oct, &d, true); if (err < 0) return err; resp = &d.data.resp; memcpy(rx_stats, &resp->if_stats.rx_stats, sizeof(struct octep_iface_rx_stats)); memcpy(tx_stats, &resp->if_stats.tx_stats, sizeof(struct octep_iface_tx_stats)); return 0; } int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid, struct octep_iface_link_info *link_info) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; struct octep_ctrl_net_h2f_resp *resp; int err; init_send_req(&d.msg, req, link_info_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET; err = octep_send_mbox_req(oct, &d, true); if (err < 0) return err; resp = &d.data.resp; link_info->supported_modes = resp->link_info.supported_modes; link_info->advertised_modes = resp->link_info.advertised_modes; link_info->autoneg = resp->link_info.autoneg; link_info->pause = resp->link_info.pause; link_info->speed = resp->link_info.speed; return 0; } int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid, struct octep_iface_link_info *link_info, bool wait_for_response) { struct octep_ctrl_net_wait_data d = {0}; struct octep_ctrl_net_h2f_req *req = &d.data.req; init_send_req(&d.msg, req, link_info_sz, vfid); req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; req->link_info.cmd = OCTEP_CTRL_NET_CMD_SET; req->link_info.info.advertised_modes = link_info->advertised_modes; req->link_info.info.autoneg = link_info->autoneg; req->link_info.info.pause = link_info->pause; req->link_info.info.speed = link_info->speed; return octep_send_mbox_req(oct, &d, wait_for_response); } static void process_mbox_resp(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg) { struct octep_ctrl_net_wait_data *pos, *n; list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) { if (pos->msg.hdr.s.msg_id == msg->hdr.s.msg_id) { memcpy(&pos->data.resp, msg->sg_list[0].msg, msg->hdr.s.sz); pos->done = 1; wake_up_interruptible_all(&oct->ctrl_req_wait_q); break; } } } static int process_mbox_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg) { struct net_device *netdev = oct->netdev; struct octep_ctrl_net_f2h_req *req; int cmd; req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg; cmd = req->hdr.s.cmd; /* check if we support this command */ if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT || octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT) return -EOPNOTSUPP; switch (cmd) { case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: if (netif_running(netdev)) { if (req->link.state) { dev_info(&oct->pdev->dev, "netif_carrier_on\n"); netif_carrier_on(netdev); } else { dev_info(&oct->pdev->dev, "netif_carrier_off\n"); netif_carrier_off(netdev); } } break; default: pr_info("Unknown mbox req : %u\n", req->hdr.s.cmd); break; } return 0; } void octep_ctrl_net_recv_fw_messages(struct octep_device *oct) { static u16 msg_sz = sizeof(union octep_ctrl_net_max_data); union octep_ctrl_net_max_data data = {0}; struct octep_ctrl_mbox_msg msg = {0}; int ret; msg.hdr.s.sz = msg_sz; msg.sg_num = 1; msg.sg_list[0].sz = msg_sz; msg.sg_list[0].msg = &data; while (true) { /* mbox will overwrite msg.hdr.s.sz so initialize it */ msg.hdr.s.sz = msg_sz; ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, (struct octep_ctrl_mbox_msg *)&msg); if (ret < 0) break; if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP) process_mbox_resp(oct, &msg); else if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY) process_mbox_notify(oct, &msg); } } int octep_ctrl_net_uninit(struct octep_device *oct) { struct octep_ctrl_net_wait_data *pos, *n; list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) pos->done = 1; wake_up_interruptible_all(&oct->ctrl_req_wait_q); octep_ctrl_mbox_uninit(&oct->ctrl_mbox); return 0; }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include "octep_config.h" #include "octep_main.h" #include "octep_regs_cn9k_pf.h" #define CTRL_MBOX_MAX_PF 128 #define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) #define FW_HB_INTERVAL_IN_SECS 1 #define FW_HB_MISS_COUNT 10 /* Names of Hardware non-queue generic interrupts */ static char *cn93_non_ioq_msix_names[] = { "epf_ire_rint", "epf_ore_rint", "epf_vfire_rint0", "epf_vfire_rint1", "epf_vfore_rint0", "epf_vfore_rint1", "epf_mbox_rint0", "epf_mbox_rint1", "epf_oei_rint", "epf_dma_rint", "epf_dma_vf_rint0", "epf_dma_vf_rint1", "epf_pp_vf_rint0", "epf_pp_vf_rint1", "epf_misc_rint", "epf_rsvd", }; /* Dump useful hardware CSRs for debug purpose */ static void cn93_dump_regs(struct octep_device *oct, int qno) { struct device *dev = &oct->pdev->dev; dev_info(dev, "IQ-%d register dump\n", qno); dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_INSTR_DBELL(qno), octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_CONTROL(qno), octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_ENABLE(qno), octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_INSTR_BADDR(qno), octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_CNTS(qno), octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_INT_LEVELS(qno), octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_PKT_CNT(qno), octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_IN_BYTE_CNT(qno), octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); dev_info(dev, "OQ-%d register dump\n", qno); dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_CONTROL(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_ENABLE(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_CNTS(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_INT_LEVELS(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_PKT_CNT(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_OUT_BYTE_CNT(qno), octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", qno, CN93_SDP_R_ERR_TYPE(qno), octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); } /* Reset Hardware Tx queue */ static int cn93_reset_iq(struct octep_device *oct, int q_no) { struct octep_config *conf = oct->conf; u64 val = 0ULL; dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); /* Get absolute queue number */ q_no += conf->pf_ring_cfg.srn; /* Disable the Tx/Instruction Ring */ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); val = 0xFFFFFFFF; octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); return 0; } /* Reset Hardware Rx queue */ static void cn93_reset_oq(struct octep_device *oct, int q_no) { u64 val = 0ULL; q_no += CFG_GET_PORTS_PF_SRN(oct->conf); /* Disable Output (Rx) Ring */ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); /* Clear count CSRs */ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); } /* Reset all hardware Tx/Rx queues */ static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) { struct pci_dev *pdev = oct->pdev; int q; dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { cn93_reset_iq(oct, q); cn93_reset_oq(oct, q); } } /* Initialize windowed addresses to access some hardware registers */ static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) { u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); } /* Configure Hardware mapping: inform hardware which rings belong to PF. */ static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) { struct octep_config *conf = oct->conf; struct pci_dev *pdev = oct->pdev; u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); int q; for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { u64 regval = 0; if (oct->pcie_port) regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", CN93_SDP_EPVF_RING(pf_srn + q), regval); } } /* Initialize configuration limits and initial active config 93xx PF. */ static void octep_init_config_cn93_pf(struct octep_device *oct) { struct octep_config *conf = oct->conf; struct pci_dev *pdev = oct->pdev; u8 link = 0; u64 val; int pos; /* Read ring configuration: * PF ring count, number of VFs and rings per VF supported */ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; conf->iq.instr_type = OCTEP_64BYTE_INSTR; conf->iq.pkind = 0; conf->iq.db_min = OCTEP_DB_MIN; conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV); if (pos) { pci_read_config_byte(oct->pdev, pos + PCI_SRIOV_FUNC_LINK, &link); link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); } conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7) + (link * CTRL_MBOX_SZ); conf->hb_interval = FW_HB_INTERVAL_IN_SECS; conf->max_hb_miss_cnt = FW_HB_MISS_COUNT; } /* Setup registers for a hardware Tx Queue */ static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) { struct octep_iq *iq = oct->iq[iq_no]; u32 reset_instr_cnt; u64 reg_val; iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); /* wait for IDLE to set to 1 */ if (!(reg_val & CN93_R_IN_CTL_IDLE)) { do { reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); } while (!(reg_val & CN93_R_IN_CTL_IDLE)); } reg_val |= CN93_R_IN_CTL_RDSIZE; reg_val |= CN93_R_IN_CTL_IS_64B; reg_val |= CN93_R_IN_CTL_ESR; octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); /* Write the start of the input queue's ring and its size */ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); /* Remember the doorbell & instruction count register addr * for this queue */ iq->doorbell_reg = oct->mmio[0].hw_addr + CN93_SDP_R_IN_INSTR_DBELL(iq_no); iq->inst_cnt_reg = oct->mmio[0].hw_addr + CN93_SDP_R_IN_CNTS(iq_no); iq->intr_lvl_reg = oct->mmio[0].hw_addr + CN93_SDP_R_IN_INT_LEVELS(iq_no); /* Store the current instruction counter (used in flush_iq calculation) */ reset_instr_cnt = readl(iq->inst_cnt_reg); writel(reset_instr_cnt, iq->inst_cnt_reg); /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); } /* Setup registers for a hardware Rx Queue */ static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) { u64 reg_val; u64 oq_ctl = 0ULL; u32 time_threshold = 0; struct octep_oq *oq = oct->oq[oq_no]; oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); /* wait for IDLE to set to 1 */ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { do { reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); } reg_val &= ~(CN93_R_OUT_CTL_IMODE); reg_val &= ~(CN93_R_OUT_CTL_ROR_P); reg_val &= ~(CN93_R_OUT_CTL_NSR_P); reg_val &= ~(CN93_R_OUT_CTL_ROR_I); reg_val &= ~(CN93_R_OUT_CTL_NSR_I); reg_val &= ~(CN93_R_OUT_CTL_ES_I); reg_val &= ~(CN93_R_OUT_CTL_ROR_D); reg_val &= ~(CN93_R_OUT_CTL_NSR_D); reg_val &= ~(CN93_R_OUT_CTL_ES_D); reg_val |= (CN93_R_OUT_CTL_ES_P); octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); /* Get the mapped address of the pkt_sent and pkts_credit regs */ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); oq->pkts_credit_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_SLIST_DBELL(oq_no); time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); } /* Setup registers for a PF mailbox */ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) { struct octep_mbox *mbox = oct->mbox[q_no]; mbox->q_no = q_no; /* PF mbox interrupt reg */ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); /* PF to VF DATA reg. PF writes into this reg */ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); /* VF to PF DATA reg. PF reads from this reg */ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); } /* Process non-ioq interrupts required to keep pf interface running. * OEI_RINT is needed for control mailbox */ static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) { bool handled = false; u64 reg0; /* Check for OEI INTR */ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); if (reg0) { dev_info(&oct->pdev->dev, "Received OEI_RINT intr: 0x%llx\n", reg0); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0); if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) queue_work(octep_wq, &oct->ctrl_mbox_task); else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) atomic_set(&oct->hb_miss_cnt, 0); handled = true; } return handled; } /* Interrupts handler for all non-queue generic interrupts. */ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) { struct octep_device *oct = (struct octep_device *)dev; struct pci_dev *pdev = oct->pdev; u64 reg_val = 0; int i = 0; /* Check for IRERR INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); if (reg_val) { dev_info(&pdev->dev, "received IRERR_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); if (reg_val) { dev_info(&pdev->dev, "Received err type on IQ-%d: 0x%llx\n", i, reg_val); octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), reg_val); } } goto irq_handled; } /* Check for ORERR INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); if (reg_val) { dev_info(&pdev->dev, "Received ORERR_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); if (reg_val) { dev_info(&pdev->dev, "Received err type on OQ-%d: 0x%llx\n", i, reg_val); octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), reg_val); } } goto irq_handled; } /* Check for VFIRE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); if (reg_val) { dev_info(&pdev->dev, "Received VFIRE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); goto irq_handled; } /* Check for VFORE INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); if (reg_val) { dev_info(&pdev->dev, "Received VFORE_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); goto irq_handled; } /* Check for MBOX INTR and OEI INTR */ if (octep_poll_non_ioq_interrupts_cn93_pf(oct)) goto irq_handled; /* Check for DMA INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); if (reg_val) { octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); goto irq_handled; } /* Check for DMA VF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); if (reg_val) { dev_info(&pdev->dev, "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); goto irq_handled; } /* Check for PPVF INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); if (reg_val) { dev_info(&pdev->dev, "Received PP_VF_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); goto irq_handled; } /* Check for MISC INTR */ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); if (reg_val) { dev_info(&pdev->dev, "Received MISC_RINT intr: 0x%llx\n", reg_val); octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); goto irq_handled; } dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); irq_handled: return IRQ_HANDLED; } /* Tx/Rx queue interrupt handler */ static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) { struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; struct octep_oq *oq = vector->oq; napi_schedule_irqoff(oq->napi); return IRQ_HANDLED; } /* soft reset of 93xx */ static int octep_soft_reset_cn93_pf(struct octep_device *oct) { dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); /* Set core domain reset bit */ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); /* Wait for 100ms as Octeon resets. */ mdelay(100); /* clear core domain reset bit */ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); return 0; } /* Re-initialize Octeon hardware registers */ static void octep_reinit_regs_cn93_pf(struct octep_device *oct) { u32 i; for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) oct->hw_ops.setup_iq_regs(oct, i); for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) oct->hw_ops.setup_oq_regs(oct, i); oct->hw_ops.enable_interrupts(oct); oct->hw_ops.enable_io_queues(oct); for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); } /* Enable all interrupts */ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) { u64 intr_mask = 0ULL; int srn, num_rings, i; srn = CFG_GET_PORTS_PF_SRN(oct->conf); num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); for (i = 0; i < num_rings; i++) intr_mask |= (0x1ULL << (srn + i)); octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); } /* Disable all interrupts */ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) { u64 intr_mask = 0ULL; int srn, num_rings, i; srn = CFG_GET_PORTS_PF_SRN(oct->conf); num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); for (i = 0; i < num_rings; i++) intr_mask |= (0x1ULL << (srn + i)); octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); } /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) { u32 pkt_in_done = readl(iq->inst_cnt_reg); u32 last_done, new_idx; last_done = pkt_in_done - iq->pkt_in_done; iq->pkt_in_done = pkt_in_done; new_idx = (iq->octep_read_index + last_done) % iq->max_count; return new_idx; } /* Enable a hardware Tx Queue */ static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) { u64 loop = HZ; u64 reg_val; iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && loop--) { schedule_timeout_interruptible(1); } reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); reg_val |= (0x1ULL << 62); octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); reg_val |= 0x1ULL; octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); } /* Enable a hardware Rx Queue */ static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) { u64 reg_val = 0ULL; oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); reg_val |= (0x1ULL << 62); octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); reg_val |= 0x1ULL; octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); } /* Enable all hardware Tx/Rx Queues assined to PF */ static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) { u8 q; for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { octep_enable_iq_cn93_pf(oct, q); octep_enable_oq_cn93_pf(oct, q); } } /* Disable a hardware Tx Queue assined to PF */ static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) { u64 reg_val = 0ULL; iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); reg_val &= ~0x1ULL; octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); } /* Disable a hardware Rx Queue assined to PF */ static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) { u64 reg_val = 0ULL; oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); reg_val &= ~0x1ULL; octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); } /* Disable all hardware Tx/Rx Queues assined to PF */ static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) { int q = 0; for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { octep_disable_iq_cn93_pf(oct, q); octep_disable_oq_cn93_pf(oct, q); } } /* Dump hardware registers (including Tx/Rx queues) for debugging. */ static void octep_dump_registers_cn93_pf(struct octep_device *oct) { u8 srn, num_rings, q; srn = CFG_GET_PORTS_PF_SRN(oct->conf); num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); for (q = srn; q < srn + num_rings; q++) cn93_dump_regs(oct, q); } /** * octep_device_setup_cn93_pf() - Setup Octeon device. * * @oct: Octeon device private data structure. * * - initialize hardware operations. * - get target side pcie port number for the device. * - setup window access to hardware registers. * - set initial configuration and max limits. * - setup hardware mapping of rings to the PF device. */ void octep_device_setup_cn93_pf(struct octep_device *oct) { oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf; oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; octep_setup_pci_window_regs_cn93_pf(oct); oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; dev_info(&oct->pdev->dev, "Octeon device using PCIE Port %d\n", oct->pcie_port); octep_init_config_cn93_pf(oct); octep_configure_ring_mapping_cn93_pf(oct); }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell Octeon EP (EndPoint) Ethernet Driver * * Copyright (C) 2020 Marvell. * */ #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/etherdevice.h> #include "octep_ctrl_mbox.h" #include "octep_config.h" #include "octep_main.h" /* Timeout in msecs for message response */ #define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100 /* Time in msecs to wait for message response */ #define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10 /* Size of mbox info in bytes */ #define OCTEP_CTRL_MBOX_INFO_SZ 256 /* Size of mbox host to fw queue info in bytes */ #define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16 /* Size of mbox fw to host queue info in bytes */ #define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16 #define OCTEP_CTRL_MBOX_TOTAL_INFO_SZ (OCTEP_CTRL_MBOX_INFO_SZ + \ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ) #define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m) #define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8) #define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16) #define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24) #define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136) #define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144) #define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) #define OCTEP_CTRL_MBOX_H2FQ_PROD(m) (OCTEP_CTRL_MBOX_H2FQ_INFO(m)) #define OCTEP_CTRL_MBOX_H2FQ_CONS(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 4) #define OCTEP_CTRL_MBOX_H2FQ_SZ(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 8) #define OCTEP_CTRL_MBOX_F2HQ_INFO(m) ((m) + \ OCTEP_CTRL_MBOX_INFO_SZ + \ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ) #define OCTEP_CTRL_MBOX_F2HQ_PROD(m) (OCTEP_CTRL_MBOX_F2HQ_INFO(m)) #define OCTEP_CTRL_MBOX_F2HQ_CONS(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 4) #define OCTEP_CTRL_MBOX_F2HQ_SZ(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 8) static const u32 mbox_hdr_sz = sizeof(union octep_ctrl_mbox_msg_hdr); static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 inc, u32 sz) { return (index + inc) % sz; } static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 sz) { return sz - (abs(pi - ci) % sz); } static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz) { return (abs(pi - ci) % sz); } int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) { u64 magic_num, status, fw_versions; if (!mbox) return -EINVAL; if (!mbox->barmem) { pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem); return -EINVAL; } magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(mbox->barmem)); if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) { pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num); return -EINVAL; } status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)); if (status != OCTEP_CTRL_MBOX_STATUS_READY) { pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); return -EINVAL; } fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem)); mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32); mbox->max_fw_version = (fw_versions & 0xffffffff); mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem)); writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); mutex_init(&mbox->h2fq_lock); mutex_init(&mbox->f2hq_lock); mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem)); mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem); mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem); mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ; mbox->f2hq.sz = readl(OCTEP_CTRL_MBOX_F2HQ_SZ(mbox->barmem)); mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD(mbox->barmem); mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS(mbox->barmem); mbox->f2hq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ + mbox->h2fq.sz; writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); /* ensure ready state is seen after everything is initialized */ wmb(); writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); pr_info("Octep ctrl mbox : Init successful.\n"); return 0; } static void octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz) { u8 __iomem *qbuf; u32 cp_sz; /* Assumption: Caller has ensured enough write space */ qbuf = (q->hw_q + *pi); if (*pi < ci) { /* copy entire w_sz */ memcpy_toio(qbuf, buf, w_sz); *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz); } else { /* copy up to end of queue */ cp_sz = min((q->sz - *pi), w_sz); memcpy_toio(qbuf, buf, cp_sz); w_sz -= cp_sz; *pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz); if (w_sz) { /* roll over and copy remaining w_sz */ buf += cp_sz; qbuf = (q->hw_q + *pi); memcpy_toio(qbuf, buf, w_sz); *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz); } } } int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) { struct octep_ctrl_mbox_msg_buf *sg; struct octep_ctrl_mbox_q *q; u32 pi, ci, buf_sz, w_sz; int s; if (!mbox || !msg) return -EINVAL; if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY) return -EIO; mutex_lock(&mbox->h2fq_lock); q = &mbox->h2fq; pi = readl(q->hw_prod); ci = readl(q->hw_cons); if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) { mutex_unlock(&mbox->h2fq_lock); return -EAGAIN; } octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz); buf_sz = msg->hdr.s.sz; for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) { sg = &msg->sg_list[s]; w_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz; octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz); buf_sz -= w_sz; } writel(pi, q->hw_prod); mutex_unlock(&mbox->h2fq_lock); return 0; } static void octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz) { u8 __iomem *qbuf; u32 cp_sz; /* Assumption: Caller has ensured enough read space */ qbuf = (q->hw_q + *ci); if (*ci < pi) { /* copy entire r_sz */ memcpy_fromio(buf, qbuf, r_sz); *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz); } else { /* copy up to end of queue */ cp_sz = min((q->sz - *ci), r_sz); memcpy_fromio(buf, qbuf, cp_sz); r_sz -= cp_sz; *ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz); if (r_sz) { /* roll over and copy remaining r_sz */ buf += cp_sz; qbuf = (q->hw_q + *ci); memcpy_fromio(buf, qbuf, r_sz); *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz); } } } int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) { struct octep_ctrl_mbox_msg_buf *sg; u32 pi, ci, r_sz, buf_sz, q_depth; struct octep_ctrl_mbox_q *q; int s; if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY) return -EIO; mutex_lock(&mbox->f2hq_lock); q = &mbox->f2hq; pi = readl(q->hw_prod); ci = readl(q->hw_cons); q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); if (q_depth < mbox_hdr_sz) { mutex_unlock(&mbox->f2hq_lock); return -EAGAIN; } octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz); buf_sz = msg->hdr.s.sz; for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) { sg = &msg->sg_list[s]; r_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz; octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz); buf_sz -= r_sz; } writel(ci, q->hw_cons); mutex_unlock(&mbox->f2hq_lock); return 0; } int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) { if (!mbox) return -EINVAL; if (!mbox->barmem) return -EINVAL; writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); /* ensure uninit state is written before uninitialization */ wmb(); mutex_destroy(&mbox->h2fq_lock); mutex_destroy(&mbox->f2hq_lock); pr_info("Octep ctrl mbox : Uninit successful.\n"); return 0; }
linux-master
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/module.h> #include "otx2_common.h" #include "otx2_ptp.h" static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp) { struct ptp_get_cap_rsp *rsp; struct msg_req *req; int err; if (!ptp->nic) return false; mutex_lock(&ptp->nic->mbox.lock); req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox); if (!req) { mutex_unlock(&ptp->nic->mbox.lock); return false; } err = otx2_sync_mbox_msg(&ptp->nic->mbox); if (err) { mutex_unlock(&ptp->nic->mbox.lock); return false; } rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, &req->hdr); mutex_unlock(&ptp->nic->mbox.lock); if (IS_ERR(rsp)) return false; if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE) return true; return false; } static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct otx2_nic *pfvf = ptp->nic; struct ptp_req *req; int rc; if (!ptp->nic) return -ENODEV; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->op = PTP_OP_ADJTIME; req->delta = delta; rc = otx2_sync_mbox_msg(&ptp->nic->mbox); mutex_unlock(&pfvf->mbox.lock); return rc; } static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp) { struct ptp_req *req; struct ptp_rsp *rsp; int err; if (!ptp->nic) return 0; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) return 0; req->op = PTP_OP_GET_CLOCK; err = otx2_sync_mbox_msg(&ptp->nic->mbox); if (err) return 0; rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) return 0; return rsp->clk; } static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info, struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 tstamp; tstamp = otx2_ptp_get_clock(ptp); *ts = ns_to_timespec64(tstamp); return 0; } static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct otx2_nic *pfvf = ptp->nic; struct ptp_req *req; u64 nsec; int rc; if (!ptp->nic) return -ENODEV; nsec = timespec64_to_ns(ts); mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->op = PTP_OP_SET_CLOCK; req->clk = nsec; rc = otx2_sync_mbox_msg(&ptp->nic->mbox); mutex_unlock(&pfvf->mbox.lock); return rc; } static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct ptp_req *req; if (!ptp->nic) return -ENODEV; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) return -ENOMEM; req->op = PTP_OP_ADJFINE; req->scaled_ppm = scaled_ppm; return otx2_sync_mbox_msg(&ptp->nic->mbox); } static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) { struct ptp_req *req; if (!ptp->nic) return -ENODEV; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) return -ENOMEM; req->op = PTP_OP_SET_THRESH; req->thresh = thresh; return otx2_sync_mbox_msg(&ptp->nic->mbox); } static int ptp_extts_on(struct otx2_ptp *ptp, int on) { struct ptp_req *req; if (!ptp->nic) return -ENODEV; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) return -ENOMEM; req->op = PTP_OP_EXTTS_ON; req->extts_on = on; return otx2_sync_mbox_msg(&ptp->nic->mbox); } static u64 ptp_cc_read(const struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); return otx2_ptp_get_clock(ptp); } static u64 ptp_tstmp_read(struct otx2_ptp *ptp) { struct ptp_req *req; struct ptp_rsp *rsp; int err; if (!ptp->nic) return 0; req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); if (!req) return 0; req->op = PTP_OP_GET_TSTMP; err = otx2_sync_mbox_msg(&ptp->nic->mbox); if (err) return 0; rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) return 0; return rsp->clk; } static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct otx2_nic *pfvf = ptp->nic; mutex_lock(&pfvf->mbox.lock); timecounter_adjtime(&ptp->time_counter, delta); mutex_unlock(&pfvf->mbox.lock); return 0; } static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info, struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 tstamp; mutex_lock(&ptp->nic->mbox.lock); tstamp = timecounter_read(&ptp->time_counter); mutex_unlock(&ptp->nic->mbox.lock); *ts = ns_to_timespec64(tstamp); return 0; } static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); u64 nsec; nsec = timespec64_to_ns(ts); mutex_lock(&ptp->nic->mbox.lock); timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec); mutex_unlock(&ptp->nic->mbox.lock); return 0; } static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { switch (func) { case PTP_PF_NONE: case PTP_PF_EXTTS: break; case PTP_PF_PEROUT: case PTP_PF_PHYSYNC: return -1; } return 0; } static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp) { /* On HW which supports atomic updates, timecounter is not initialized */ return tstamp; } static void otx2_ptp_extts_check(struct work_struct *work) { struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, extts_work.work); struct ptp_clock_event event; u64 tstmp, new_thresh; mutex_lock(&ptp->nic->mbox.lock); tstmp = ptp_tstmp_read(ptp); mutex_unlock(&ptp->nic->mbox.lock); if (tstmp != ptp->last_extts) { event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp); ptp_clock_event(ptp->ptp_clock, &event); new_thresh = tstmp % 500000000; if (ptp->thresh != new_thresh) { mutex_lock(&ptp->nic->mbox.lock); ptp_set_thresh(ptp, new_thresh); mutex_unlock(&ptp->nic->mbox.lock); ptp->thresh = new_thresh; } ptp->last_extts = tstmp; } schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); } static void otx2_sync_tstamp(struct work_struct *work) { struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, synctstamp_work.work); struct otx2_nic *pfvf = ptp->nic; u64 tstamp; mutex_lock(&pfvf->mbox.lock); tstamp = otx2_ptp_get_clock(ptp); mutex_unlock(&pfvf->mbox.lock); ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp); ptp->base_ns = tstamp % NSEC_PER_SEC; schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250)); } static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); int pin; if (!ptp->nic) return -ENODEV; switch (rq->type) { case PTP_CLK_REQ_EXTTS: pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, rq->extts.index); if (pin < 0) return -EBUSY; if (on) { ptp_extts_on(ptp, on); schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); } else { ptp_extts_on(ptp, on); cancel_delayed_work_sync(&ptp->extts_work); } return 0; default: break; } return -EOPNOTSUPP; } int otx2_ptp_init(struct otx2_nic *pfvf) { struct otx2_ptp *ptp_ptr; struct cyclecounter *cc; struct ptp_req *req; int err; if (is_otx2_lbkvf(pfvf->pdev)) { pfvf->ptp = NULL; return 0; } mutex_lock(&pfvf->mbox.lock); /* check if PTP block is available */ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->op = PTP_OP_GET_CLOCK; err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } mutex_unlock(&pfvf->mbox.lock); ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL); if (!ptp_ptr) { err = -ENOMEM; goto error; } ptp_ptr->nic = pfvf; snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); ptp_ptr->extts_config.index = 0; ptp_ptr->extts_config.func = PTP_PF_NONE; ptp_ptr->ptp_info = (struct ptp_clock_info) { .owner = THIS_MODULE, .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, .n_ext_ts = 1, .n_pins = 1, .pps = 0, .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, .enable = otx2_ptp_enable, .verify = otx2_ptp_verify_pin, }; /* Check whether hardware supports atomic updates to timestamp */ if (is_tstmp_atomic_update_supported(ptp_ptr)) { ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime; ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime; ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime; ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time; } else { ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime; ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime; ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime; cc = &ptp_ptr->cycle_counter; cc->read = ptp_cc_read; cc->mask = CYCLECOUNTER_MASK(64); cc->mult = 1; cc->shift = 0; ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time; timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, ktime_to_ns(ktime_get_real())); } INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) { err = ptp_ptr->ptp_clock ? PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV; kfree(ptp_ptr); goto error; } if (is_dev_otx2(pfvf->pdev)) { ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp; ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp; } else { ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp; ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp; } INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp); pfvf->ptp = ptp_ptr; error: return err; } EXPORT_SYMBOL_GPL(otx2_ptp_init); void otx2_ptp_destroy(struct otx2_nic *pfvf) { struct otx2_ptp *ptp = pfvf->ptp; if (!ptp) return; cancel_delayed_work(&pfvf->ptp->synctstamp_work); ptp_clock_unregister(ptp->ptp_clock); kfree(ptp); pfvf->ptp = NULL; } EXPORT_SYMBOL_GPL(otx2_ptp_destroy); int otx2_ptp_clock_index(struct otx2_nic *pfvf) { if (!pfvf->ptp) return -ENODEV; return ptp_clock_index(pfvf->ptp->ptp_clock); } EXPORT_SYMBOL_GPL(otx2_ptp_clock_index); int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns) { if (!pfvf->ptp) return -ENODEV; *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp); return 0; } EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time); MODULE_AUTHOR("Sunil Goutham <[email protected]>"); MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. * */ #include "otx2_common.h" static int otx2_check_pfc_config(struct otx2_nic *pfvf) { u8 tx_queues = pfvf->hw.tx_queues, prio; u8 pfc_en = pfvf->pfc_en; for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { if ((pfc_en & (1 << prio)) && prio > tx_queues - 1) { dev_warn(pfvf->dev, "Increase number of tx queues from %d to %d to support PFC.\n", tx_queues, prio + 1); return -EINVAL; } } return 0; } int otx2_pfc_txschq_config(struct otx2_nic *pfvf) { u8 pfc_en, pfc_bit_set; int prio, lvl, err; pfc_en = pfvf->pfc_en; for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { pfc_bit_set = pfc_en & (1 << prio); /* Either PFC bit is not set * or tx scheduler is not allocated for the priority */ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio]) continue; /* configure the scheduler for the tls*/ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { err = otx2_txschq_config(pfvf, lvl, prio, true); if (err) { dev_err(pfvf->dev, "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n", __func__, lvl, prio); return err; } } } return 0; } static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) { struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; int lvl, rc; /* Get memory to put this msg */ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); if (!req) return -ENOMEM; /* Request one schq per level upto max level as configured * link config level. These rest of the scheduler can be * same as hw.txschq_list. */ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) req->schq[lvl] = 1; rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) return rc; rsp = (struct nix_txsch_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) return PTR_ERR(rsp); /* Setup transmit scheduler list */ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) { if (!rsp->schq[lvl]) return -ENOSPC; pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0]; } /* Set the Tx schedulers for rest of the levels same as * hw.txschq_list as those will be common for all. */ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++) pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0]; pfvf->pfc_alloc_status[prio] = true; return 0; } int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) { u8 pfc_en = pfvf->pfc_en; u8 pfc_bit_set; int err, prio; for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { pfc_bit_set = pfc_en & (1 << prio); if (!pfc_bit_set || pfvf->pfc_alloc_status[prio]) continue; /* Add new scheduler to the priority */ err = otx2_pfc_txschq_alloc_one(pfvf, prio); if (err) { dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__); return err; } } return 0; } static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) { int lvl; /* free PFC TLx nodes */ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) otx2_txschq_free_one(pfvf, lvl, pfvf->pfc_schq_list[lvl][prio]); pfvf->pfc_alloc_status[prio] = false; return 0; } static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio) { struct nix_cn10k_aq_enq_req *cn10k_sq_aq; struct net_device *dev = pfvf->netdev; bool if_up = netif_running(dev); struct nix_aq_enq_req *sq_aq; if (if_up) { if (pfvf->pfc_alloc_status[prio]) netif_tx_stop_all_queues(pfvf->netdev); else netif_tx_stop_queue(netdev_get_tx_queue(dev, prio)); } if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!cn10k_sq_aq) return -ENOMEM; /* Fill AQ info */ cn10k_sq_aq->qidx = prio; cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ; cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE; /* Fill fields to update */ cn10k_sq_aq->sq.ena = 1; cn10k_sq_aq->sq_mask.ena = 1; cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0); cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio); } else { sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!sq_aq) return -ENOMEM; /* Fill AQ info */ sq_aq->qidx = prio; sq_aq->ctype = NIX_AQ_CTYPE_SQ; sq_aq->op = NIX_AQ_INSTOP_WRITE; /* Fill fields to update */ sq_aq->sq.ena = 1; sq_aq->sq_mask.ena = 1; sq_aq->sq_mask.smq = GENMASK(8, 0); sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio); } otx2_sync_mbox_msg(&pfvf->mbox); if (if_up) { if (pfvf->pfc_alloc_status[prio]) netif_tx_start_all_queues(pfvf->netdev); else netif_tx_start_queue(netdev_get_tx_queue(dev, prio)); } return 0; } int otx2_pfc_txschq_update(struct otx2_nic *pfvf) { bool if_up = netif_running(pfvf->netdev); u8 pfc_en = pfvf->pfc_en, pfc_bit_set; struct mbox *mbox = &pfvf->mbox; int err, prio; mutex_lock(&mbox->lock); for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { pfc_bit_set = pfc_en & (1 << prio); /* tx scheduler was created but user wants to disable now */ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) { mutex_unlock(&mbox->lock); if (if_up) netif_tx_stop_all_queues(pfvf->netdev); otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]); if (if_up) netif_tx_start_all_queues(pfvf->netdev); /* delete the schq */ err = otx2_pfc_txschq_stop_one(pfvf, prio); if (err) { dev_err(pfvf->dev, "%s failed to stop PFC tx schedulers for priority: %d\n", __func__, prio); return err; } mutex_lock(&mbox->lock); goto update_sq_smq_map; } /* Either PFC bit is not set * or Tx scheduler is already mapped for the priority */ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio]) continue; /* Add new scheduler to the priority */ err = otx2_pfc_txschq_alloc_one(pfvf, prio); if (err) { mutex_unlock(&mbox->lock); dev_err(pfvf->dev, "%s failed to allocate PFC tx schedulers for priority: %d\n", __func__, prio); return err; } update_sq_smq_map: err = otx2_pfc_update_sq_smq_mapping(pfvf, prio); if (err) { mutex_unlock(&mbox->lock); dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio); return err; } } err = otx2_pfc_txschq_config(pfvf); mutex_unlock(&mbox->lock); if (err) return err; return 0; } int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) { u8 pfc_en, pfc_bit_set; int prio, err; pfc_en = pfvf->pfc_en; for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) { pfc_bit_set = pfc_en & (1 << prio); if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio]) continue; /* Delete the existing scheduler */ err = otx2_pfc_txschq_stop_one(pfvf, prio); if (err) { dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__); return err; } } return 0; } int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) { struct cgx_pfc_cfg *req; struct cgx_pfc_rsp *rsp; int err = 0; if (is_otx2_lbkvf(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox); if (!req) { err = -ENOMEM; goto unlock; } if (pfvf->pfc_en) { req->rx_pause = true; req->tx_pause = true; } else { req->rx_pause = false; req->tx_pause = false; } req->pfc_en = pfvf->pfc_en; if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pfc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) { dev_warn(pfvf->dev, "Failed to config PFC\n"); err = -EPERM; } } unlock: mutex_unlock(&pfvf->mbox.lock); return err; } void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable) { bool if_up = netif_running(pfvf->netdev); struct npa_aq_enq_req *npa_aq; struct nix_aq_enq_req *aq; int err = 0; if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) { dev_warn(pfvf->dev, "PFC enable not permitted as Priority %d already mapped to Queue %d\n", pfvf->queue_to_pfc_map[qidx], qidx); return; } if (if_up) { netif_tx_stop_all_queues(pfvf->netdev); netif_carrier_off(pfvf->netdev); } pfvf->queue_to_pfc_map[qidx] = vlan_prio; aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!aq) { err = -ENOMEM; goto out; } aq->cq.bpid = pfvf->bpid[vlan_prio]; aq->cq_mask.bpid = GENMASK(8, 0); /* Fill AQ info */ aq->qidx = qidx; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_WRITE; otx2_sync_mbox_msg(&pfvf->mbox); npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!npa_aq) { err = -ENOMEM; goto out; } npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio]; npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0); /* Fill NPA AQ info */ npa_aq->aura_id = qidx; npa_aq->ctype = NPA_AQ_CTYPE_AURA; npa_aq->op = NPA_AQ_INSTOP_WRITE; otx2_sync_mbox_msg(&pfvf->mbox); out: if (if_up) { netif_carrier_on(pfvf->netdev); netif_tx_start_all_queues(pfvf->netdev); } if (err) dev_warn(pfvf->dev, "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n", qidx, err); } static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct otx2_nic *pfvf = netdev_priv(dev); pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; pfc->pfc_en = pfvf->pfc_en; return 0; } static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct otx2_nic *pfvf = netdev_priv(dev); int err; /* Save PFC configuration to interface */ pfvf->pfc_en = pfc->pfc_en; if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) goto process_pfc; /* Check if the PFC configuration can be * supported by the tx queue configuration */ err = otx2_check_pfc_config(pfvf); if (err) return err; process_pfc: err = otx2_config_priority_flow_ctrl(pfvf); if (err) return err; /* Request Per channel Bpids */ if (pfc->pfc_en) otx2_nix_config_bp(pfvf, true); err = otx2_pfc_txschq_update(pfvf); if (err) { dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); return err; } return 0; } static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev) { return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; } static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode) { return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; } static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = { .ieee_getpfc = otx2_dcbnl_ieee_getpfc, .ieee_setpfc = otx2_dcbnl_ieee_setpfc, .getdcbx = otx2_dcbnl_getdcbx, .setdcbx = otx2_dcbnl_setdcbx, }; int otx2_dcbnl_set_ops(struct net_device *dev) { struct otx2_nic *pfvf = netdev_priv(dev); pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues, GFP_KERNEL); if (!pfvf->queue_to_pfc_map) return -ENOMEM; dev->dcbnl_ops = &otx2_dcbnl_ops; return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Physical Function ethernet driver * * Copyright (C) 2023 Marvell. * */ #include <linux/netdevice.h> #include <net/tso.h> #include "cn10k.h" #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_txrx.h" #include "otx2_struct.h" #define OTX2_QOS_MAX_LEAF_NODES 16 static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id) { struct otx2_pool *pool; if (!pfvf->qset.pool) return; pool = &pfvf->qset.pool[pool_id]; qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); pool->stack = NULL; pool->fc_addr = NULL; } static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; int pool_id, stack_pages, num_sqbs; struct otx2_hw *hw = &pfvf->hw; struct otx2_snd_queue *sq; struct otx2_pool *pool; dma_addr_t bufptr; int err, ptr; u64 iova, pa; /* Calculate number of SQBs needed. * * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. * Last SQE is used for pointing to next SQB. */ num_sqbs = (hw->sqb_size / 128) - 1; num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; /* Get no of stack pages needed */ stack_pages = (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool = &pfvf->qset.pool[pool_id]; /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); if (err) return err; /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto aura_free; /* Flush accumulated messages */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto pool_free; /* Allocate pointers and free them to aura/pool */ sq = &qset->sq[qidx]; sq->sqb_count = 0; sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); if (!sq->sqb_ptrs) { err = -ENOMEM; goto pool_free; } for (ptr = 0; ptr < num_sqbs; ptr++) { err = otx2_alloc_rbuf(pfvf, pool, &bufptr); if (err) goto sqb_free; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } return 0; sqb_free: while (ptr--) { if (!sq->sqb_ptrs[ptr]) continue; iova = sq->sqb_ptrs[ptr]; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); put_page(virt_to_page(phys_to_virt(pa))); otx2_aura_allocptr(pfvf, pool_id); } sq->sqb_count = 0; kfree(sq->sqb_ptrs); pool_free: qmem_free(pfvf->dev, pool->stack); aura_free: qmem_free(pfvf->dev, pool->fc_addr); otx2_mbox_reset(&pfvf->mbox.mbox, 0); return err; } static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; struct otx2_hw *hw = &pfvf->hw; struct otx2_snd_queue *sq; u64 iova, pa; int sqb; sq = &qset->sq[qidx]; if (!sq->sqb_ptrs) return; for (sqb = 0; sqb < sq->sqb_count; sqb++) { if (!sq->sqb_ptrs[sqb]) continue; iova = sq->sqb_ptrs[sqb]; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); put_page(virt_to_page(phys_to_virt(pa))); } sq->sqb_count = 0; sq = &qset->sq[qidx]; qmem_free(pfvf->dev, sq->sqe); qmem_free(pfvf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); qmem_free(pfvf->dev, sq->timestamps); memset((void *)sq, 0, sizeof(*sq)); } /* send queue id */ static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) { int sqe_tail, sqe_head; u64 incr, *ptr, val; ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); incr = (u64)qidx << 32; val = otx2_atomic64_add(incr, ptr); sqe_head = (val >> 20) & 0x3F; sqe_tail = (val >> 28) & 0x3F; if (sqe_head != sqe_tail) usleep_range(50, 60); } static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) { struct nix_cn10k_aq_enq_req *cn10k_sq_aq; struct npa_aq_enq_req *aura_aq; struct npa_aq_enq_req *pool_aq; struct nix_aq_enq_req *sq_aq; if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!cn10k_sq_aq) return -ENOMEM; cn10k_sq_aq->qidx = qidx; cn10k_sq_aq->sq.ena = 0; cn10k_sq_aq->sq_mask.ena = 1; cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ; cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE; } else { sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!sq_aq) return -ENOMEM; sq_aq->qidx = qidx; sq_aq->sq.ena = 0; sq_aq->sq_mask.ena = 1; sq_aq->ctype = NIX_AQ_CTYPE_SQ; sq_aq->op = NIX_AQ_INSTOP_WRITE; } aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!aura_aq) { otx2_mbox_reset(&pfvf->mbox.mbox, 0); return -ENOMEM; } aura_aq->aura_id = aura_id; aura_aq->aura.ena = 0; aura_aq->aura_mask.ena = 1; aura_aq->ctype = NPA_AQ_CTYPE_AURA; aura_aq->op = NPA_AQ_INSTOP_WRITE; pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!pool_aq) { otx2_mbox_reset(&pfvf->mbox.mbox, 0); return -ENOMEM; } pool_aq->aura_id = aura_id; pool_aq->pool.ena = 0; pool_aq->pool_mask.ena = 1; pool_aq->ctype = NPA_AQ_CTYPE_POOL; pool_aq->op = NPA_AQ_INSTOP_WRITE; return otx2_sync_mbox_msg(&pfvf->mbox); } int otx2_qos_get_qid(struct otx2_nic *pfvf) { int qidx; qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues); return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx; } void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx) { clear_bit(qidx, pfvf->qos.qos_sq_bmap); } int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_hw *hw = &pfvf->hw; int pool_id, sq_idx, err; if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return -EPERM; sq_idx = hw->non_qos_queues + qidx; mutex_lock(&pfvf->mbox.lock); err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx); if (err) goto out; pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx); err = otx2_sq_init(pfvf, sq_idx, pool_id); if (err) goto out; out: mutex_unlock(&pfvf->mbox.lock); return err; } void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; struct otx2_hw *hw = &pfvf->hw; struct otx2_snd_queue *sq; struct otx2_cq_queue *cq; int pool_id, sq_idx; sq_idx = hw->non_qos_queues + qidx; /* If the DOWN flag is set SQs are already freed */ if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return; sq = &pfvf->qset.sq[sq_idx]; if (!sq->sqb_ptrs) return; if (sq_idx < hw->non_qos_queues || sq_idx >= otx2_get_total_tx_queues(pfvf)) { netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n"); return; } cq = &qset->cq[pfvf->hw.rx_queues + sq_idx]; pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx); otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); otx2_qos_ctx_disable(pfvf, sq_idx, pool_id); mutex_unlock(&pfvf->mbox.lock); otx2_qos_sq_free_sqbs(pfvf, sq_idx); otx2_qos_aura_pool_free(pfvf, pool_id); }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/interrupt.h> #include <linux/pci.h> #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; u64 *ptr; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } static void otx2_nix_sq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; u64 *ptr; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } void otx2_update_lmac_stats(struct otx2_nic *pfvf) { struct msg_req *req; if (!netif_running(pfvf->netdev)) return; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return; } otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); } void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf) { struct msg_req *req; if (!netif_running(pfvf->netdev)) return; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); if (req) otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); } int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) { struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; if (!pfvf->qset.rq) return 0; otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); return 1; } int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) { struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; if (!pfvf->qset.sq) return 0; if (qidx >= pfvf->hw.non_qos_queues) { if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap)) return 0; } otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); return 1; } void otx2_get_dev_stats(struct otx2_nic *pfvf) { struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); dev_stats->rx_frames = dev_stats->rx_bcast_frames + dev_stats->rx_mcast_frames + dev_stats->rx_ucast_frames; dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); dev_stats->tx_frames = dev_stats->tx_bcast_frames + dev_stats->tx_mcast_frames + dev_stats->tx_ucast_frames; } void otx2_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_dev_stats *dev_stats; otx2_get_dev_stats(pfvf); dev_stats = &pfvf->hw.dev_stats; stats->rx_bytes = dev_stats->rx_bytes; stats->rx_packets = dev_stats->rx_frames; stats->rx_dropped = dev_stats->rx_drops; stats->multicast = dev_stats->rx_mcast_frames; stats->tx_bytes = dev_stats->tx_bytes; stats->tx_packets = dev_stats->tx_frames; stats->tx_dropped = dev_stats->tx_drops; } EXPORT_SYMBOL(otx2_get_stats64); /* Sync MAC address with RVU AF */ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) { struct nix_set_mac_addr *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, struct net_device *netdev) { struct nix_get_mac_addr_rsp *rsp; struct mbox_msghdr *msghdr; struct msg_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(msghdr)) { mutex_unlock(&pfvf->mbox.lock); return PTR_ERR(msghdr); } rsp = (struct nix_get_mac_addr_rsp *)msghdr; eth_hw_addr_set(netdev, rsp->mac_addr); mutex_unlock(&pfvf->mbox.lock); return 0; } int otx2_set_mac_address(struct net_device *netdev, void *p) { struct otx2_nic *pfvf = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { eth_hw_addr_set(netdev, addr->sa_data); /* update dmac field in vlan offload rule */ if (netif_running(netdev) && pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); /* update dmac address in ntuple and DMAC filter list */ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } return 0; } EXPORT_SYMBOL(otx2_set_mac_address); int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) { struct nix_frs_cfg *req; u16 maxlen; int err; maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; /* Use max receive length supported by hardware for loopback devices */ if (is_otx2_lbkvf(pfvf->pdev)) req->maxlen = maxlen; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } int otx2_config_pause_frm(struct otx2_nic *pfvf) { struct cgx_pause_frm_cfg *req; int err; if (is_otx2_lbkvf(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); if (!req) { err = -ENOMEM; goto unlock; } req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); req->set = 1; err = otx2_sync_mbox_msg(&pfvf->mbox); unlock: mutex_unlock(&pfvf->mbox.lock); return err; } EXPORT_SYMBOL(otx2_config_pause_frm); int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; struct nix_rss_flowkey_cfg_rsp *rsp; struct nix_rss_flowkey_cfg *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->mcam_index = -1; /* Default or reserved index */ req->flowkey_cfg = rss->flowkey_cfg; req->group = DEFAULT_RSS_CONTEXT_GROUP; err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto fail; rsp = (struct nix_rss_flowkey_cfg_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { err = PTR_ERR(rsp); goto fail; } pfvf->hw.flowkey_alg_idx = rsp->alg_idx; fail: mutex_unlock(&pfvf->mbox.lock); return err; } int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; const int index = rss->rss_size * ctx_id; struct mbox *mbox = &pfvf->mbox; struct otx2_rss_ctx *rss_ctx; struct nix_aq_enq_req *aq; int idx, err; mutex_lock(&mbox->lock); rss_ctx = rss->rss_ctx[ctx_id]; /* Get memory to put this msg */ for (idx = 0; idx < rss->rss_size; idx++) { aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); if (!aq) { /* The shared memory buffer can be full. * Flush it and retry */ err = otx2_sync_mbox_msg(mbox); if (err) { mutex_unlock(&mbox->lock); return err; } aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); if (!aq) { mutex_unlock(&mbox->lock); return -ENOMEM; } } aq->rss.rq = rss_ctx->ind_tbl[idx]; /* Fill AQ info */ aq->qidx = index + idx; aq->ctype = NIX_AQ_CTYPE_RSS; aq->op = NIX_AQ_INSTOP_INIT; } err = otx2_sync_mbox_msg(mbox); mutex_unlock(&mbox->lock); return err; } void otx2_set_rss_key(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; u64 *key = (u64 *)&rss->key[4]; int idx; /* 352bit or 44byte key needs to be configured as below * NIX_LF_RX_SECRETX0 = key<351:288> * NIX_LF_RX_SECRETX1 = key<287:224> * NIX_LF_RX_SECRETX2 = key<223:160> * NIX_LF_RX_SECRETX3 = key<159:96> * NIX_LF_RX_SECRETX4 = key<95:32> * NIX_LF_RX_SECRETX5<63:32> = key<31:0> */ otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), (u64)(*((u32 *)&rss->key)) << 32); idx = sizeof(rss->key) / sizeof(u64); while (idx > 0) { idx--; otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); } } int otx2_rss_init(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; struct otx2_rss_ctx *rss_ctx; int idx, ret = 0; rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); /* Init RSS key if it is not setup already */ if (!rss->enable) netdev_rss_key_fill(rss->key, sizeof(rss->key)); otx2_set_rss_key(pfvf); if (!netif_is_rxfh_configured(pfvf->netdev)) { /* Set RSS group 0 as default indirection table */ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, GFP_KERNEL); if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) return -ENOMEM; rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; for (idx = 0; idx < rss->rss_size; idx++) rss_ctx->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, pfvf->hw.rx_queues); } ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); if (ret) return ret; /* Flowkey or hash config to be used for generating flow tag */ rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN | NIX_FLOW_KEY_TYPE_IPV4_PROTO; ret = otx2_set_flowkey_cfg(pfvf); if (ret) return ret; rss->enable = true; return 0; } /* Setup UDP segmentation algorithm in HW */ static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4) { struct nix_lso_format *field; field = (struct nix_lso_format *)&lso->fields[0]; lso->field_mask = GENMASK(18, 0); /* IP's Length field */ field->layer = NIX_TXLAYER_OL3; /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ field->offset = v4 ? 2 : 4; field->sizem1 = 1; /* i.e 2 bytes */ field->alg = NIX_LSOALG_ADD_PAYLEN; field++; /* No ID field in IPv6 header */ if (v4) { /* Increment IPID */ field->layer = NIX_TXLAYER_OL3; field->offset = 4; field->sizem1 = 1; /* i.e 2 bytes */ field->alg = NIX_LSOALG_ADD_SEGNUM; field++; } /* Update length in UDP header */ field->layer = NIX_TXLAYER_OL4; field->offset = 4; field->sizem1 = 1; field->alg = NIX_LSOALG_ADD_PAYLEN; } /* Setup segmentation algorithms in HW and retrieve algorithm index */ void otx2_setup_segmentation(struct otx2_nic *pfvf) { struct nix_lso_format_cfg_rsp *rsp; struct nix_lso_format_cfg *lso; struct otx2_hw *hw = &pfvf->hw; int err; mutex_lock(&pfvf->mbox.lock); /* UDPv4 segmentation */ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); if (!lso) goto fail; /* Setup UDP/IP header fields that HW should update per segment */ otx2_setup_udp_segmentation(lso, true); err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto fail; rsp = (struct nix_lso_format_cfg_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); if (IS_ERR(rsp)) goto fail; hw->lso_udpv4_idx = rsp->lso_format_idx; /* UDPv6 segmentation */ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); if (!lso) goto fail; /* Setup UDP/IP header fields that HW should update per segment */ otx2_setup_udp_segmentation(lso, false); err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto fail; rsp = (struct nix_lso_format_cfg_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); if (IS_ERR(rsp)) goto fail; hw->lso_udpv6_idx = rsp->lso_format_idx; mutex_unlock(&pfvf->mbox.lock); return; fail: mutex_unlock(&pfvf->mbox.lock); netdev_info(pfvf->netdev, "Failed to get LSO index for UDP GSO offload, disabling\n"); pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; } void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) { /* Configure CQE interrupt coalescing parameters * * HW triggers an irq when ECOUNT > cq_ecount_wait, hence * set 1 less than cq_ecount_wait. And cq_time_wait is in * usecs, convert that to 100ns count. */ otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | ((u64)pfvf->hw.cq_qcount_wait << 32) | (pfvf->hw.cq_ecount_wait - 1)); } static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma) { unsigned int offset = 0; struct page *page; size_t sz; sz = SKB_DATA_ALIGN(pool->rbsize); sz = ALIGN(sz, OTX2_ALIGN); page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); if (unlikely(!page)) return -ENOMEM; *dma = page_pool_get_dma_addr(page) + offset; return 0; } static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma) { u8 *buf; if (pool->page_pool) return otx2_alloc_pool_buf(pfvf, pool, dma); buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); if (unlikely(!buf)) return -ENOMEM; *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(pfvf->dev, *dma))) { page_frag_free(buf); return -ENOMEM; } return 0; } int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma) { int ret; local_bh_disable(); ret = __otx2_alloc_rbuf(pfvf, pool, dma); local_bh_enable(); return ret; } int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) return -ENOMEM; return 0; } void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) { struct otx2_nic *pfvf = netdev_priv(netdev); schedule_work(&pfvf->reset_task); } EXPORT_SYMBOL(otx2_tx_timeout); void otx2_get_mac_from_af(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); int err; err = otx2_hw_get_mac_addr(pfvf, netdev); if (err) dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); /* If AF doesn't provide a valid MAC, generate a random one */ if (!is_valid_ether_addr(netdev->dev_addr)) eth_hw_addr_random(netdev); } EXPORT_SYMBOL(otx2_get_mac_from_af); int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc) { u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC]; struct otx2_hw *hw = &pfvf->hw; struct nix_txschq_config *req; u64 schq, parent; u64 dwrr_val; dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) return -ENOMEM; req->lvl = lvl; req->num_regs = 1; schq_list = hw->txschq_list; #ifdef CONFIG_DCB if (txschq_for_pfc) schq_list = pfvf->pfc_schq_list; #endif schq = schq_list[lvl][prio]; /* Set topology e.t.c configuration */ if (lvl == NIX_TXSCH_LVL_SMQ) { req->reg[0] = NIX_AF_SMQX_CFG(schq); req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | (0x2ULL << 36); /* Set link type for DWRR MTU selection on CN10K silicons */ if (!is_dev_otx2(pfvf->pdev)) req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57), (u64)hw->smq_link_type); req->num_regs++; /* MDQ config */ parent = schq_list[NIX_TXSCH_LVL_TL4][prio]; req->reg[1] = NIX_AF_MDQX_PARENT(schq); req->regval[1] = parent << 16; req->num_regs++; /* Set DWRR quantum */ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; if (lvl == hw->txschq_link_cfg_lvl) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure * and set relative channel */ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; } } else if (lvl == NIX_TXSCH_LVL_TL2) { parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; if (lvl == hw->txschq_link_cfg_lvl) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure * and set relative channel */ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; } } else if (lvl == NIX_TXSCH_LVL_TL1) { /* Default config for TL1. * For VF this is always ignored. */ /* On CN10K, if RR_WEIGHT is greater than 16384, HW will * clip it to 16384, so configuring a 24bit max value * will work on both OTx2 and CN10K. */ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; req->num_regs++; req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); req->num_regs++; req->reg[2] = NIX_AF_TL1X_CIR(schq); req->regval[2] = 0; } return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_txschq_config); int otx2_smq_flush(struct otx2_nic *pfvf, int smq) { struct nix_txschq_config *req; int rc; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->lvl = NIX_TXSCH_LVL_SMQ; req->reg[0] = NIX_AF_SMQX_CFG(smq); req->regval[0] |= BIT_ULL(49); req->num_regs++; rc = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return rc; } EXPORT_SYMBOL(otx2_smq_flush); int otx2_txsch_alloc(struct otx2_nic *pfvf) { struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; int lvl, schq, rc; /* Get memory to put this msg */ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); if (!req) return -ENOMEM; /* Request one schq per level */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) req->schq[lvl] = 1; rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) return rc; rsp = (struct nix_txsch_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) return PTR_ERR(rsp); /* Setup transmit scheduler list */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) for (schq = 0; schq < rsp->schq[lvl]; schq++) pfvf->hw.txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; return 0; } void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) { struct nix_txsch_free_req *free_req; int err; mutex_lock(&pfvf->mbox.lock); free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); if (!free_req) { mutex_unlock(&pfvf->mbox.lock); netdev_err(pfvf->netdev, "Failed alloc txschq free req\n"); return; } free_req->schq_lvl = lvl; free_req->schq = schq; err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { netdev_err(pfvf->netdev, "Failed stop txschq %d at level %d\n", schq, lvl); } mutex_unlock(&pfvf->mbox.lock); } EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { int lvl, schq; /* free non QOS TLx nodes */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) otx2_txschq_free_one(pfvf, lvl, pfvf->hw.txschq_list[lvl][0]); /* Clear the txschq list */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) pfvf->hw.txschq_list[lvl][schq] = 0; } } void otx2_sqb_flush(struct otx2_nic *pfvf) { int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; u64 incr, *ptr, val; int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { sq = &pfvf->qset.sq[qidx]; if (!sq->sqb_ptrs) continue; incr = (u64)qidx << 32; while (timeout) { val = otx2_atomic64_add(incr, ptr); sqe_head = (val >> 20) & 0x3F; sqe_tail = (val >> 28) & 0x3F; if (sqe_head == sqe_tail) break; usleep_range(1, 3); timeout--; } } } /* RED and drop levels of CQ on packet reception. * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). */ #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) /* RED and drop levels of AURA for packet reception. * For AURA level is measure of fullness (0x0 = empty, 255 = full). * Eg: For RQ length 1K, for pass/drop level 204/230. * RED accepts pkts if free pointers > 102 & <= 205. * Drops pkts if free pointers < 102. */ #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) { struct otx2_qset *qset = &pfvf->qset; struct nix_aq_enq_req *aq; /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; aq->rq.cq = qidx; aq->rq.ena = 1; aq->rq.pb_caching = 1; aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ aq->rq.qint_idx = 0; aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; /* Fill AQ info */ aq->qidx = qidx; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = NIX_AQ_INSTOP_INIT; return otx2_sync_mbox_msg(&pfvf->mbox); } int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { struct otx2_nic *pfvf = dev; struct otx2_snd_queue *sq; struct nix_aq_enq_req *aq; sq = &pfvf->qset.sq[qidx]; sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; aq->sq.cq = pfvf->hw.rx_queues + qidx; aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ aq->sq.cq_ena = 1; aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; aq->sq.qint_idx = 0; /* Due pipelining impact minimum 2000 unused SQ CQE's * need to maintain to avoid CQ overflow. */ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); /* Fill AQ info */ aq->qidx = qidx; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_INIT; return otx2_sync_mbox_msg(&pfvf->mbox); } int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) { struct otx2_qset *qset = &pfvf->qset; struct otx2_snd_queue *sq; struct otx2_pool *pool; int err; pool = &pfvf->qset.pool[sqb_aura]; sq = &qset->sq[qidx]; sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; sq->sqe_cnt = qset->sqe_cnt; err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); if (err) return err; if (qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, TSO_HEADER_SIZE); if (err) return err; } sq->sqe_base = sq->sqe->base; sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); if (!sq->sg) return -ENOMEM; if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); if (err) return err; } sq->head = 0; sq->cons_head = 0; sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; /* Set SQE threshold to 10% of total SQEs */ sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; sq->aura_id = sqb_aura; sq->aura_fc_addr = pool->fc_addr->base; sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); sq->stats.bytes = 0; sq->stats.pkts = 0; return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); } static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; cq = &qset->cq[qidx]; cq->cq_idx = qidx; non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; if (qidx < pfvf->hw.rx_queues) { cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; if (pfvf->xdp_prog) xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; cq->cqe_cnt = qset->sqe_cnt; } else { if (pfvf->hw.xdp_queues && qidx < non_xdp_queues + pfvf->hw.xdp_queues) { cq->cq_type = CQ_XDP; cq->cint_idx = qidx - non_xdp_queues; cq->cqe_cnt = qset->sqe_cnt; } else { cq->cq_type = CQ_QOS; cq->cint_idx = qidx - non_xdp_queues - pfvf->hw.xdp_queues; cq->cqe_cnt = qset->sqe_cnt; } } cq->cqe_size = pfvf->qset.xqe_size; /* Allocate memory for CQEs */ err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); if (err) return err; /* Save CQE CPU base for faster reference */ cq->cqe_base = cq->cqe->base; /* In case where all RQs auras point to single pool, * all CQs receive buffer pool also point to same pool. */ pool_id = ((cq->cq_type == CQ_RX) && (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; cq->rbpool = &qset->pool[pool_id]; cq->refill_task_sched = false; /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; aq->cq.ena = 1; aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); aq->cq.caching = 1; aq->cq.base = cq->cqe->iova; aq->cq.cint_idx = cq->cint_idx; aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; aq->cq.qint_idx = 0; aq->cq.avg_level = 255; if (qidx < pfvf->hw.rx_queues) { aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); aq->cq.drop_ena = 1; if (!is_otx2_lbkvf(pfvf->pdev)) { /* Enable receive CQ backpressure */ aq->cq.bp_ena = 1; #ifdef CONFIG_DCB aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; #else aq->cq.bpid = pfvf->bpid[0]; #endif /* Set backpressure level is same as cq pass level */ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); } } /* Fill AQ info */ aq->qidx = qidx; aq->ctype = NIX_AQ_CTYPE_CQ; aq->op = NIX_AQ_INSTOP_INIT; return otx2_sync_mbox_msg(&pfvf->mbox); } static void otx2_pool_refill_task(struct work_struct *work) { struct otx2_cq_queue *cq; struct refill_work *wrk; struct otx2_nic *pfvf; int qidx; wrk = container_of(work, struct refill_work, pool_refill_work.work); pfvf = wrk->pf; qidx = wrk - pfvf->refill_wrk; cq = &pfvf->qset.cq[qidx]; cq->refill_task_sched = false; local_bh_disable(); napi_schedule(wrk->napi); local_bh_enable(); } int otx2_config_nix_queues(struct otx2_nic *pfvf) { int qidx, err; /* Initialize RX queues */ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); err = otx2_rq_init(pfvf, qidx, lpb_aura); if (err) return err; } /* Initialize TX queues */ for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) { u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); err = otx2_sq_init(pfvf, qidx, sqb_aura); if (err) return err; } /* Initialize completion queues */ for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { err = otx2_cq_init(pfvf, qidx); if (err) return err; } pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_CQ_OP_STATUS); /* Initialize work queue for receive buffer refill */ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, sizeof(struct refill_work), GFP_KERNEL); if (!pfvf->refill_wrk) return -ENOMEM; for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { pfvf->refill_wrk[qidx].pf = pfvf; INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, otx2_pool_refill_task); } return 0; } int otx2_config_nix(struct otx2_nic *pfvf) { struct nix_lf_alloc_req *nixlf; struct nix_lf_alloc_rsp *rsp; int err; pfvf->qset.xqe_size = pfvf->hw.xqe_size; /* Get memory to put this msg */ nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); if (!nixlf) return -ENOMEM; /* Set RQ/SQ/CQ counts */ nixlf->rq_cnt = pfvf->hw.rx_queues; nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf); nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; /* We don't know absolute NPA LF idx attached. * AF will replace 'RVU_DEFAULT_PF_FUNC' with * NPA LF attached to this RVU PF/VF. */ nixlf->npa_func = RVU_DEFAULT_PF_FUNC; /* Disable alignment pad, enable L2 length check, * enable L4 TCP/UDP checksum verification. */ nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) return err; rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &nixlf->hdr); if (IS_ERR(rsp)) return PTR_ERR(rsp); if (rsp->qints < 1) return -ENXIO; return rsp->hdr.rc; } void otx2_sq_free_sqbs(struct otx2_nic *pfvf) { struct otx2_qset *qset = &pfvf->qset; struct otx2_hw *hw = &pfvf->hw; struct otx2_snd_queue *sq; int sqb, qidx; u64 iova, pa; for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { sq = &qset->sq[qidx]; if (!sq->sqb_ptrs) continue; for (sqb = 0; sqb < sq->sqb_count; sqb++) { if (!sq->sqb_ptrs[sqb]) continue; iova = sq->sqb_ptrs[sqb]; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); put_page(virt_to_page(phys_to_virt(pa))); } sq->sqb_count = 0; } } void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size) { struct page *page; u64 pa; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_head_page(phys_to_virt(pa)); if (pool->page_pool) { page_pool_put_full_page(pool->page_pool, page, true); } else { dma_unmap_page_attrs(pfvf->dev, iova, size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); put_page(page); } } void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) { int pool_id, pool_start = 0, pool_end = 0, size = 0; struct otx2_pool *pool; u64 iova; if (type == AURA_NIX_SQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); pool_end = pool_start + pfvf->hw.sqpool_cnt; size = pfvf->hw.sqb_size; } if (type == AURA_NIX_RQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); pool_end = pfvf->hw.rqpool_cnt; size = pfvf->rbsize; } /* Free SQB and RQB pointers from the aura pool */ for (pool_id = pool_start; pool_id < pool_end; pool_id++) { iova = otx2_aura_allocptr(pfvf, pool_id); pool = &pfvf->qset.pool[pool_id]; while (iova) { if (type == AURA_NIX_RQ) iova -= OTX2_HEAD_ROOM; otx2_free_bufs(pfvf, pool, iova, size); iova = otx2_aura_allocptr(pfvf, pool_id); } } } void otx2_aura_pool_free(struct otx2_nic *pfvf) { struct otx2_pool *pool; int pool_id; if (!pfvf->qset.pool) return; for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); page_pool_destroy(pool->page_pool); pool->page_pool = NULL; } devm_kfree(pfvf->dev, pfvf->qset.pool); pfvf->qset.pool = NULL; } int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs) { struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; pool = &pfvf->qset.pool[pool_id]; /* Allocate memory for HW to update Aura count. * Alloc one cache line, so that it fits all FC_STYPE modes. */ if (!pool->fc_addr) { err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); if (err) return err; } /* Initialize this aura's context via AF */ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!aq) { /* Shared mbox memory buffer is full, flush it and retry */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) return err; aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; } aq->aura_id = aura_id; /* Will be filled by AF with correct pool context address */ aq->aura.pool_addr = pool_id; aq->aura.pool_caching = 1; aq->aura.shift = ilog2(numptrs) - 8; aq->aura.count = numptrs; aq->aura.limit = numptrs; aq->aura.avg_level = 255; aq->aura.ena = 1; aq->aura.fc_ena = 1; aq->aura.fc_addr = pool->fc_addr->iova; aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ /* Enable backpressure for RQ aura */ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { aq->aura.bp_ena = 0; /* If NIX1 LF is attached then specify NIX1_RX. * * Below NPA_AURA_S[BP_ENA] is set according to the * NPA_BPINTF_E enumeration given as: * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so * NIX0_RX is 0x0 + 0*0x1 = 0 * NIX1_RX is 0x0 + 1*0x1 = 1 * But in HRM it is given that * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to * NIX-RX based on [BP] level. One bit per NIX-RX; index * enumerated by NPA_BPINTF_E." */ if (pfvf->nix_blkaddr == BLKADDR_NIX1) aq->aura.bp_ena = 1; #ifdef CONFIG_DCB aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; #else aq->aura.nix0_bpid = pfvf->bpid[0]; #endif /* Set backpressure level for RQ's Aura */ aq->aura.bp = RQ_BP_LVL_AURA; } /* Fill AQ info */ aq->ctype = NPA_AQ_CTYPE_AURA; aq->op = NPA_AQ_INSTOP_INIT; return 0; } int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { struct page_pool_params pp_params = { 0 }; struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; pool = &pfvf->qset.pool[pool_id]; /* Alloc memory for stack which is used to store buffer pointers */ err = qmem_alloc(pfvf->dev, &pool->stack, stack_pages, pfvf->hw.stack_pg_bytes); if (err) return err; pool->rbsize = buf_size; /* Initialize this pool's context via AF */ aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!aq) { /* Shared mbox memory buffer is full, flush it and retry */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { qmem_free(pfvf->dev, pool->stack); return err; } aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); if (!aq) { qmem_free(pfvf->dev, pool->stack); return -ENOMEM; } } aq->aura_id = pool_id; aq->pool.stack_base = pool->stack->iova; aq->pool.stack_caching = 1; aq->pool.ena = 1; aq->pool.buf_size = buf_size / 128; aq->pool.stack_max_pages = stack_pages; aq->pool.shift = ilog2(numptrs) - 8; aq->pool.ptr_start = 0; aq->pool.ptr_end = ~0ULL; /* Fill AQ info */ aq->ctype = NPA_AQ_CTYPE_POOL; aq->op = NPA_AQ_INSTOP_INIT; if (type != AURA_NIX_RQ) { pool->page_pool = NULL; return 0; } pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); pp_params.nid = NUMA_NO_NODE; pp_params.dev = pfvf->dev; pp_params.dma_dir = DMA_FROM_DEVICE; pool->page_pool = page_pool_create(&pp_params); if (IS_ERR(pool->page_pool)) { netdev_err(pfvf->netdev, "Creation of page pool failed\n"); return PTR_ERR(pool->page_pool); } return 0; } int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) { int qidx, pool_id, stack_pages, num_sqbs; struct otx2_qset *qset = &pfvf->qset; struct otx2_hw *hw = &pfvf->hw; struct otx2_snd_queue *sq; struct otx2_pool *pool; dma_addr_t bufptr; int err, ptr; /* Calculate number of SQBs needed. * * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. * Last SQE is used for pointing to next SQB. */ num_sqbs = (hw->sqb_size / 128) - 1; num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; /* Get no of stack pages needed */ stack_pages = (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); if (err) goto fail; /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto fail; } /* Flush accumulated messages */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto fail; /* Allocate pointers and free them to aura/pool */ for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool = &pfvf->qset.pool[pool_id]; sq = &qset->sq[qidx]; sq->sqb_count = 0; sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); if (!sq->sqb_ptrs) { err = -ENOMEM; goto err_mem; } for (ptr = 0; ptr < num_sqbs; ptr++) { err = otx2_alloc_rbuf(pfvf, pool, &bufptr); if (err) goto err_mem; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } } err_mem: return err ? -ENOMEM : 0; fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); return err; } int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; int stack_pages, pool_id, rq; struct otx2_pool *pool; int err, ptr, num_ptrs; dma_addr_t bufptr; num_ptrs = pfvf->qset.rqe_cnt; stack_pages = (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; for (rq = 0; rq < hw->rx_queues; rq++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); if (err) goto fail; } for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { err = otx2_pool_init(pfvf, pool_id, stack_pages, num_ptrs, pfvf->rbsize, AURA_NIX_RQ); if (err) goto fail; } /* Flush accumulated messages */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto fail; /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; for (ptr = 0; ptr < num_ptrs; ptr++) { err = otx2_alloc_rbuf(pfvf, pool, &bufptr); if (err) return -ENOMEM; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM); } } return 0; fail: otx2_mbox_reset(&pfvf->mbox.mbox, 0); otx2_aura_pool_free(pfvf); return err; } int otx2_config_npa(struct otx2_nic *pfvf) { struct otx2_qset *qset = &pfvf->qset; struct npa_lf_alloc_req *npalf; struct otx2_hw *hw = &pfvf->hw; int aura_cnt; /* Pool - Stack of free buffer pointers * Aura - Alloc/frees pointers from/to pool for NIX DMA. */ if (!hw->pool_cnt) return -EINVAL; qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, sizeof(struct otx2_pool), GFP_KERNEL); if (!qset->pool) return -ENOMEM; /* Get memory to put this msg */ npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); if (!npalf) return -ENOMEM; /* Set aura and pool counts */ npalf->nr_pools = hw->pool_cnt; aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; return otx2_sync_mbox_msg(&pfvf->mbox); } int otx2_detach_resources(struct mbox *mbox) { struct rsrc_detach *detach; mutex_lock(&mbox->lock); detach = otx2_mbox_alloc_msg_detach_resources(mbox); if (!detach) { mutex_unlock(&mbox->lock); return -ENOMEM; } /* detach all */ detach->partial = false; /* Send detach request to AF */ otx2_mbox_msg_send(&mbox->mbox, 0); mutex_unlock(&mbox->lock); return 0; } EXPORT_SYMBOL(otx2_detach_resources); int otx2_attach_npa_nix(struct otx2_nic *pfvf) { struct rsrc_attach *attach; struct msg_req *msix; int err; mutex_lock(&pfvf->mbox.lock); /* Get memory to put this msg */ attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); if (!attach) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } attach->npalf = true; attach->nixlf = true; /* Send attach request to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } pfvf->nix_blkaddr = BLKADDR_NIX0; /* If the platform has two NIX blocks then LF may be * allocated from NIX1. */ if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) pfvf->nix_blkaddr = BLKADDR_NIX1; /* Get NPA and NIX MSIX vector offsets */ msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); if (!msix) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } mutex_unlock(&pfvf->mbox.lock); if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { dev_err(pfvf->dev, "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); return -EINVAL; } return 0; } EXPORT_SYMBOL(otx2_attach_npa_nix); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) { struct hwctx_disable_req *req; mutex_lock(&mbox->lock); /* Request AQ to disable this context */ if (npa) req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); else req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); if (!req) { mutex_unlock(&mbox->lock); return; } req->ctype = type; if (otx2_sync_mbox_msg(mbox)) dev_err(mbox->pfvf->dev, "%s failed to disable context\n", __func__); mutex_unlock(&mbox->lock); } int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) { struct nix_bp_cfg_req *req; if (enable) req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); else req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); if (!req) return -ENOMEM; req->chan_base = 0; #ifdef CONFIG_DCB req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; #else req->chan_cnt = 1; req->bpid_per_chan = 0; #endif return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, struct cgx_stats_rsp *rsp) { int id; for (id = 0; id < CGX_RX_STATS_COUNT; id++) pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; for (id = 0; id < CGX_TX_STATS_COUNT; id++) pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; } void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, struct cgx_fec_stats_rsp *rsp) { pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; } void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, struct npa_lf_alloc_rsp *rsp) { pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; } EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, struct nix_lf_alloc_rsp *rsp) { pfvf->hw.sqb_size = rsp->sqb_size; pfvf->hw.rx_chan_base = rsp->rx_chan_base; pfvf->hw.tx_chan_base = rsp->tx_chan_base; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; pfvf->hw.lbk_links = rsp->lbk_links; pfvf->hw.tx_link = rsp->tx_link; } EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); void mbox_handler_msix_offset(struct otx2_nic *pfvf, struct msix_offset_rsp *rsp) { pfvf->hw.npa_msixoff = rsp->npa_msixoff; pfvf->hw.nix_msixoff = rsp->nix_msixoff; } EXPORT_SYMBOL(mbox_handler_msix_offset); void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, struct nix_bp_cfg_rsp *rsp) { int chan, chan_id; for (chan = 0; chan < rsp->chan_cnt; chan++) { chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; } } EXPORT_SYMBOL(mbox_handler_nix_bp_enable); void otx2_free_cints(struct otx2_nic *pfvf, int n) { struct otx2_qset *qset = &pfvf->qset; struct otx2_hw *hw = &pfvf->hw; int irq, qidx; for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; qidx < n; qidx++, irq++) { int vector = pci_irq_vector(pfvf->pdev, irq); irq_set_affinity_hint(vector, NULL); free_cpumask_var(hw->affinity_mask[irq]); free_irq(vector, &qset->napi[qidx]); } } void otx2_set_cints_affinity(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; int vec, cpu, irq, cint; vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; cpu = cpumask_first(cpu_online_mask); /* CQ interrupts */ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) return; cpumask_set_cpu(cpu, hw->affinity_mask[vec]); irq = pci_irq_vector(pfvf->pdev, vec); irq_set_affinity_hint(irq, hw->affinity_mask[vec]); cpu = cpumask_next(cpu, cpu_online_mask); if (unlikely(cpu >= nr_cpu_ids)) cpu = 0; } } static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw) { if (is_otx2_lbkvf(pfvf->pdev)) { pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK; return hw->lbk_dwrr_mtu; } pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM; return hw->rpm_dwrr_mtu; } u16 otx2_get_max_mtu(struct otx2_nic *pfvf) { struct nix_hw_info *rsp; struct msg_req *req; u16 max_mtu; int rc; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); if (!req) { rc = -ENOMEM; goto out; } rc = otx2_sync_mbox_msg(&pfvf->mbox); if (!rc) { rsp = (struct nix_hw_info *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); /* HW counts VLAN insertion bytes (8 for double tag) * irrespective of whether SQE is requesting to insert VLAN * in the packet or not. Hence these 8 bytes have to be * discounted from max packet size otherwise HW will throw * SMQ errors */ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; /* Also save DWRR MTU, needed for DWRR weight calculation */ pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp); if (!pfvf->hw.dwrr_mtu) pfvf->hw.dwrr_mtu = 1; } out: mutex_unlock(&pfvf->mbox.lock); if (rc) { dev_warn(pfvf->dev, "Failed to get MTU from hardware setting default value(1500)\n"); max_mtu = 1500; } return max_mtu; } EXPORT_SYMBOL(otx2_get_max_mtu); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = features ^ netdev->features; struct otx2_nic *pfvf = netdev_priv(netdev); bool ntuple = !!(features & NETIF_F_NTUPLE); bool tc = !!(features & NETIF_F_HW_TC); if ((changed & NETIF_F_NTUPLE) && !ntuple) otx2_destroy_ntuple_flows(pfvf); if ((changed & NETIF_F_NTUPLE) && ntuple) { if (!pfvf->flow_cfg->max_flows) { netdev_err(netdev, "Can't enable NTUPLE, MCAM entries not allocated\n"); return -EINVAL; } } if ((changed & NETIF_F_HW_TC) && !tc && otx2_tc_flower_rule_cnt(pfvf)) { netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); return -EBUSY; } if ((changed & NETIF_F_NTUPLE) && ntuple && otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { netdev_err(netdev, "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n"); return -EINVAL; } return 0; } EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ struct _req_type *req, \ struct _rsp_type *rsp) \ { \ /* Nothing to do here */ \ return 0; \ } \ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <net/ipv6.h> #include <linux/sort.h> #include "otx2_common.h" #define OTX2_DEFAULT_ACTION 0x1 static int otx2_mcam_entry_init(struct otx2_nic *pfvf); struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; u32 location; u32 entry; bool is_vf; u8 rss_ctx_id; #define DMAC_FILTER_RULE BIT(0) #define PFC_FLOWCTRL_RULE BIT(1) u16 rule_type; int vf; }; enum dmac_req { DMAC_ADDR_UPDATE, DMAC_ADDR_DEL }; static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) { devm_kfree(pfvf->dev, flow_cfg->flow_ent); flow_cfg->flow_ent = NULL; flow_cfg->max_flows = 0; } static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_free_entry_req *req; int ent, err; if (!flow_cfg->max_flows) return 0; mutex_lock(&pfvf->mbox.lock); for (ent = 0; ent < flow_cfg->max_flows; ent++) { req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); if (!req) break; req->entry = flow_cfg->flow_ent[ent]; /* Send message to AF to free MCAM entries */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) break; } mutex_unlock(&pfvf->mbox.lock); otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; } static int mcam_entry_cmp(const void *a, const void *b) { return *(u16 *)a - *(u16 *)b; } int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; struct npc_mcam_alloc_entry_rsp *rsp; int ent, allocated = 0; /* Free current ones and allocate new ones with requested count */ otx2_free_ntuple_mcam_entries(pfvf); if (!count) return 0; flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count, sizeof(u16), GFP_KERNEL); if (!flow_cfg->flow_ent) { netdev_err(pfvf->netdev, "%s: Unable to allocate memory for flow entries\n", __func__); return -ENOMEM; } mutex_lock(&pfvf->mbox.lock); /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries * can only be allocated. */ while (allocated < count) { req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); if (!req) goto exit; req->contig = false; req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? NPC_MAX_NONCONTIG_ENTRIES : count - allocated; /* Allocate higher priority entries for PFs, so that VF's entries * will be on top of PF. */ if (!is_otx2_vf(pfvf->pcifunc)) { req->priority = NPC_MCAM_HIGHER_PRIO; req->ref_entry = flow_cfg->def_ent[0]; } /* Send message to AF */ if (otx2_sync_mbox_msg(&pfvf->mbox)) goto exit; rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); for (ent = 0; ent < rsp->count; ent++) flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; allocated += rsp->count; /* If this request is not fulfilled, no need to send * further requests. */ if (rsp->count != req->count) break; } /* Multiple MCAM entry alloc requests could result in non-sequential * MCAM entries in the flow_ent[] array. Sort them in an ascending order, * otherwise user installed ntuple filter index and MCAM entry index will * not be in sync. */ if (allocated) sort(&flow_cfg->flow_ent[0], allocated, sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); exit: mutex_unlock(&pfvf->mbox.lock); flow_cfg->max_flows = allocated; if (allocated) { pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; } if (allocated != count) netdev_info(pfvf->netdev, "Unable to allocate %d MCAM entries, got only %d\n", count, allocated); return allocated; } EXPORT_SYMBOL(otx2_alloc_mcam_entries); static int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_get_field_status_req *freq; struct npc_get_field_status_rsp *frsp; struct npc_mcam_alloc_entry_req *req; struct npc_mcam_alloc_entry_rsp *rsp; int vf_vlan_max_flows; int ent, count; vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; count = OTX2_MAX_UNICAST_FLOWS + OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, sizeof(u16), GFP_KERNEL); if (!flow_cfg->def_ent) return -ENOMEM; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->contig = false; req->count = count; /* Send message to AF */ if (otx2_sync_mbox_msg(&pfvf->mbox)) { mutex_unlock(&pfvf->mbox.lock); return -EINVAL; } rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); if (rsp->count != req->count) { netdev_info(pfvf->netdev, "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n"); mutex_unlock(&pfvf->mbox.lock); devm_kfree(pfvf->dev, flow_cfg->def_ent); return 0; } for (ent = 0; ent < rsp->count; ent++) flow_cfg->def_ent[ent] = rsp->entry_list[ent]; flow_cfg->vf_vlan_offset = 0; flow_cfg->unicast_offset = vf_vlan_max_flows; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + OTX2_MAX_UNICAST_FLOWS; pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; /* Check if NPC_DMAC field is supported * by the mkex profile before setting VLAN support flag. */ freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox); if (!freq) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } freq->field = NPC_DMAC; if (otx2_sync_mbox_msg(&pfvf->mbox)) { mutex_unlock(&pfvf->mbox.lock); return -EINVAL; } frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &freq->hdr); if (frsp->enable) { pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; } pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; } pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; return 0; } /* TODO : revisit on size */ #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg; pfvf->flow_cfg = devm_kzalloc(pfvf->dev, sizeof(struct otx2_flow_config), GFP_KERNEL); if (!pfvf->flow_cfg) return -ENOMEM; pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev, BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), sizeof(long), GFP_KERNEL); if (!pfvf->flow_cfg->dmacflt_bmap) return -ENOMEM; flow_cfg = pfvf->flow_cfg; INIT_LIST_HEAD(&flow_cfg->flow_list); INIT_LIST_HEAD(&flow_cfg->flow_list_tc); flow_cfg->max_flows = 0; return 0; } EXPORT_SYMBOL(otx2vf_mcam_flow_init); int otx2_mcam_flow_init(struct otx2_nic *pf) { int err; pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config), GFP_KERNEL); if (!pf->flow_cfg) return -ENOMEM; pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev, BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ), sizeof(long), GFP_KERNEL); if (!pf->flow_cfg->dmacflt_bmap) return -ENOMEM; INIT_LIST_HEAD(&pf->flow_cfg->flow_list); INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. */ err = otx2_mcam_entry_init(pf); if (err) return err; /* Check if MCAM entries are allocate or not */ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) return 0; pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); if (!pf->mac_table) return -ENOMEM; otx2_dmacflt_get_max_cnt(pf); /* DMAC filters are not allocated */ if (!pf->flow_cfg->dmacflt_max_flows) return 0; pf->flow_cfg->bmap_to_dmacindex = devm_kzalloc(pf->dev, sizeof(u32) * pf->flow_cfg->dmacflt_max_flows, GFP_KERNEL); if (!pf->flow_cfg->bmap_to_dmacindex) return -ENOMEM; pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; return 0; } void otx2_mcam_flow_del(struct otx2_nic *pf) { otx2_destroy_mcam_flows(pf); } EXPORT_SYMBOL(otx2_mcam_flow_del); /* On success adds mcam entry * On failure enable promisous mode */ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) { struct otx2_flow_config *flow_cfg = pf->flow_cfg; struct npc_install_flow_req *req; int err, i; if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) return -ENOMEM; /* dont have free mcam entries or uc list is greater than alloted */ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) return -ENOMEM; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } /* unicast offset starts with 32 0..31 for ntuple */ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { if (pf->mac_table[i].inuse) continue; ether_addr_copy(pf->mac_table[i].addr, mac); pf->mac_table[i].inuse = true; pf->mac_table[i].mcam_entry = flow_cfg->def_ent[i + flow_cfg->unicast_offset]; req->entry = pf->mac_table[i].mcam_entry; break; } ether_addr_copy(req->packet.dmac, mac); eth_broadcast_addr((u8 *)&req->mask.dmac); req->features = BIT_ULL(NPC_DMAC); req->channel = pf->hw.rx_chan_base; req->intf = NIX_INTF_RX; req->op = NIX_RX_ACTION_DEFAULT; req->set_cntr = 1; err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(netdev, "Add %pM to CGX/RPM DMAC filters list as well\n", mac); return otx2_do_add_macfilter(pf, mac); } static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, int *mcam_entry) { int i; for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { if (!pf->mac_table[i].inuse) continue; if (ether_addr_equal(pf->mac_table[i].addr, mac)) { *mcam_entry = pf->mac_table[i].mcam_entry; pf->mac_table[i].inuse = false; return true; } } return false; } int otx2_del_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); struct npc_delete_flow_req *req; int err, mcam_entry; /* check does mcam entry exists for given mac */ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry)) return 0; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } req->entry = mcam_entry; /* Send message to AF */ err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow *iter; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { if (iter->location == location) return iter; } return NULL; } static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) { struct list_head *head = &pfvf->flow_cfg->flow_list; struct otx2_flow *iter; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { if (iter->location > flow->location) break; head = &iter->list; } list_add(&flow->list, head); } int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) { if (!flow_cfg) return 0; if (flow_cfg->nr_flows == flow_cfg->max_flows || !bitmap_empty(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; else return flow_cfg->max_flows; } EXPORT_SYMBOL(otx2_get_maxflows); int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { if (iter->location == location) { nfc->fs = iter->flow_spec; nfc->rss_context = iter->rss_ctx_id; return 0; } } return -ENOENT; } int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 *rule_locs) { u32 rule_cnt = nfc->rule_cnt; u32 location = 0; int idx = 0; int err = 0; nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) rule_locs[idx++] = location; location++; } nfc->rule_cnt = rule_cnt; return err; } static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req, u32 flow_type) { struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec; struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec; struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec; struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec; struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec; struct flow_msg *pmask = &req->mask; struct flow_msg *pkt = &req->packet; switch (flow_type) { case IP_USER_FLOW: if (ipv4_usr_mask->ip4src) { memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src, sizeof(pkt->ip4src)); memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src, sizeof(pmask->ip4src)); req->features |= BIT_ULL(NPC_SIP_IPV4); } if (ipv4_usr_mask->ip4dst) { memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst, sizeof(pkt->ip4dst)); memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } if (ipv4_usr_mask->tos) { pkt->tos = ipv4_usr_hdr->tos; pmask->tos = ipv4_usr_mask->tos; req->features |= BIT_ULL(NPC_TOS); } if (ipv4_usr_mask->proto) { switch (ipv4_usr_hdr->proto) { case IPPROTO_ICMP: req->features |= BIT_ULL(NPC_IPPROTO_ICMP); break; case IPPROTO_TCP: req->features |= BIT_ULL(NPC_IPPROTO_TCP); break; case IPPROTO_UDP: req->features |= BIT_ULL(NPC_IPPROTO_UDP); break; case IPPROTO_SCTP: req->features |= BIT_ULL(NPC_IPPROTO_SCTP); break; case IPPROTO_AH: req->features |= BIT_ULL(NPC_IPPROTO_AH); break; case IPPROTO_ESP: req->features |= BIT_ULL(NPC_IPPROTO_ESP); break; default: return -EOPNOTSUPP; } } pkt->etype = cpu_to_be16(ETH_P_IP); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); break; case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: pkt->etype = cpu_to_be16(ETH_P_IP); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); if (ipv4_l4_mask->ip4src) { memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src, sizeof(pkt->ip4src)); memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src, sizeof(pmask->ip4src)); req->features |= BIT_ULL(NPC_SIP_IPV4); } if (ipv4_l4_mask->ip4dst) { memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst, sizeof(pkt->ip4dst)); memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } if (ipv4_l4_mask->tos) { pkt->tos = ipv4_l4_hdr->tos; pmask->tos = ipv4_l4_mask->tos; req->features |= BIT_ULL(NPC_TOS); } if (ipv4_l4_mask->psrc) { memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, sizeof(pkt->sport)); memcpy(&pmask->sport, &ipv4_l4_mask->psrc, sizeof(pmask->sport)); if (flow_type == UDP_V4_FLOW) req->features |= BIT_ULL(NPC_SPORT_UDP); else if (flow_type == TCP_V4_FLOW) req->features |= BIT_ULL(NPC_SPORT_TCP); else req->features |= BIT_ULL(NPC_SPORT_SCTP); } if (ipv4_l4_mask->pdst) { memcpy(&pkt->dport, &ipv4_l4_hdr->pdst, sizeof(pkt->dport)); memcpy(&pmask->dport, &ipv4_l4_mask->pdst, sizeof(pmask->dport)); if (flow_type == UDP_V4_FLOW) req->features |= BIT_ULL(NPC_DPORT_UDP); else if (flow_type == TCP_V4_FLOW) req->features |= BIT_ULL(NPC_DPORT_TCP); else req->features |= BIT_ULL(NPC_DPORT_SCTP); } if (flow_type == UDP_V4_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_UDP); else if (flow_type == TCP_V4_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_TCP); else req->features |= BIT_ULL(NPC_IPPROTO_SCTP); break; case AH_V4_FLOW: case ESP_V4_FLOW: pkt->etype = cpu_to_be16(ETH_P_IP); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); if (ah_esp_mask->ip4src) { memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src, sizeof(pkt->ip4src)); memcpy(&pmask->ip4src, &ah_esp_mask->ip4src, sizeof(pmask->ip4src)); req->features |= BIT_ULL(NPC_SIP_IPV4); } if (ah_esp_mask->ip4dst) { memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst, sizeof(pkt->ip4dst)); memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst, sizeof(pmask->ip4dst)); req->features |= BIT_ULL(NPC_DIP_IPV4); } if (ah_esp_mask->tos) { pkt->tos = ah_esp_hdr->tos; pmask->tos = ah_esp_mask->tos; req->features |= BIT_ULL(NPC_TOS); } /* NPC profile doesn't extract AH/ESP header fields */ if (ah_esp_mask->spi & ah_esp_hdr->spi) return -EOPNOTSUPP; if (flow_type == AH_V4_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_AH); else req->features |= BIT_ULL(NPC_IPPROTO_ESP); break; default: break; } return 0; } static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req, u32 flow_type) { struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec; struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec; struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec; struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec; struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec; struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec; struct flow_msg *pmask = &req->mask; struct flow_msg *pkt = &req->packet; switch (flow_type) { case IPV6_USER_FLOW: if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) { memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src, sizeof(pkt->ip6src)); memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src, sizeof(pmask->ip6src)); req->features |= BIT_ULL(NPC_SIP_IPV6); } if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) { memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst, sizeof(pkt->ip6dst)); memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst, sizeof(pmask->ip6dst)); req->features |= BIT_ULL(NPC_DIP_IPV6); } if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) { pkt->next_header = ipv6_usr_hdr->l4_proto; pmask->next_header = ipv6_usr_mask->l4_proto; req->features |= BIT_ULL(NPC_IPFRAG_IPV6); } pkt->etype = cpu_to_be16(ETH_P_IPV6); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: pkt->etype = cpu_to_be16(ETH_P_IPV6); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) { memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src, sizeof(pkt->ip6src)); memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src, sizeof(pmask->ip6src)); req->features |= BIT_ULL(NPC_SIP_IPV6); } if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) { memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst, sizeof(pkt->ip6dst)); memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst, sizeof(pmask->ip6dst)); req->features |= BIT_ULL(NPC_DIP_IPV6); } if (ipv6_l4_mask->psrc) { memcpy(&pkt->sport, &ipv6_l4_hdr->psrc, sizeof(pkt->sport)); memcpy(&pmask->sport, &ipv6_l4_mask->psrc, sizeof(pmask->sport)); if (flow_type == UDP_V6_FLOW) req->features |= BIT_ULL(NPC_SPORT_UDP); else if (flow_type == TCP_V6_FLOW) req->features |= BIT_ULL(NPC_SPORT_TCP); else req->features |= BIT_ULL(NPC_SPORT_SCTP); } if (ipv6_l4_mask->pdst) { memcpy(&pkt->dport, &ipv6_l4_hdr->pdst, sizeof(pkt->dport)); memcpy(&pmask->dport, &ipv6_l4_mask->pdst, sizeof(pmask->dport)); if (flow_type == UDP_V6_FLOW) req->features |= BIT_ULL(NPC_DPORT_UDP); else if (flow_type == TCP_V6_FLOW) req->features |= BIT_ULL(NPC_DPORT_TCP); else req->features |= BIT_ULL(NPC_DPORT_SCTP); } if (flow_type == UDP_V6_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_UDP); else if (flow_type == TCP_V6_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_TCP); else req->features |= BIT_ULL(NPC_IPPROTO_SCTP); break; case AH_V6_FLOW: case ESP_V6_FLOW: pkt->etype = cpu_to_be16(ETH_P_IPV6); pmask->etype = cpu_to_be16(0xFFFF); req->features |= BIT_ULL(NPC_ETYPE); if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) { memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src, sizeof(pkt->ip6src)); memcpy(&pmask->ip6src, &ah_esp_mask->ip6src, sizeof(pmask->ip6src)); req->features |= BIT_ULL(NPC_SIP_IPV6); } if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) { memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst, sizeof(pkt->ip6dst)); memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst, sizeof(pmask->ip6dst)); req->features |= BIT_ULL(NPC_DIP_IPV6); } /* NPC profile doesn't extract AH/ESP header fields */ if ((ah_esp_mask->spi & ah_esp_hdr->spi) || (ah_esp_mask->tclass & ah_esp_hdr->tclass)) return -EOPNOTSUPP; if (flow_type == AH_V6_FLOW) req->features |= BIT_ULL(NPC_IPPROTO_AH); else req->features |= BIT_ULL(NPC_IPPROTO_ESP); break; default: break; } return 0; } static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req) { struct ethhdr *eth_mask = &fsp->m_u.ether_spec; struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; struct flow_msg *pmask = &req->mask; struct flow_msg *pkt = &req->packet; u32 flow_type; int ret; flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); switch (flow_type) { /* bits not set in mask are don't care */ case ETHER_FLOW: if (!is_zero_ether_addr(eth_mask->h_source)) { ether_addr_copy(pkt->smac, eth_hdr->h_source); ether_addr_copy(pmask->smac, eth_mask->h_source); req->features |= BIT_ULL(NPC_SMAC); } if (!is_zero_ether_addr(eth_mask->h_dest)) { ether_addr_copy(pkt->dmac, eth_hdr->h_dest); ether_addr_copy(pmask->dmac, eth_mask->h_dest); req->features |= BIT_ULL(NPC_DMAC); } if (eth_hdr->h_proto) { memcpy(&pkt->etype, &eth_hdr->h_proto, sizeof(pkt->etype)); memcpy(&pmask->etype, &eth_mask->h_proto, sizeof(pmask->etype)); req->features |= BIT_ULL(NPC_ETYPE); } break; case IP_USER_FLOW: case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: ret = otx2_prepare_ipv4_flow(fsp, req, flow_type); if (ret) return ret; break; case IPV6_USER_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: ret = otx2_prepare_ipv6_flow(fsp, req, flow_type); if (ret) return ret; break; default: return -EOPNOTSUPP; } if (fsp->flow_type & FLOW_EXT) { u16 vlan_etype; if (fsp->m_ext.vlan_etype) { /* Partial masks not supported */ if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF) return -EINVAL; vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype); /* Drop rule with vlan_etype == 802.1Q * and vlan_id == 0 is not supported */ if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci && fsp->ring_cookie == RX_CLS_FLOW_DISC) return -EINVAL; /* Only ETH_P_8021Q and ETH_P_802AD types supported */ if (vlan_etype != ETH_P_8021Q && vlan_etype != ETH_P_8021AD) return -EINVAL; memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype, sizeof(pkt->vlan_etype)); memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype, sizeof(pmask->vlan_etype)); if (vlan_etype == ETH_P_8021Q) req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG); else req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG); } if (fsp->m_ext.vlan_tci) { memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, sizeof(pkt->vlan_tci)); memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, sizeof(pmask->vlan_tci)); req->features |= BIT_ULL(NPC_OUTER_VID); } if (fsp->m_ext.data[1]) { if (flow_type == IP_USER_FLOW) { if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE) return -EINVAL; pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]); pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]); req->features |= BIT_ULL(NPC_IPFRAG_IPV4); } else if (fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION)) { /* Not Drop/Direct to queue but use action * in default entry */ req->op = NIX_RX_ACTION_DEFAULT; } } } if (fsp->flow_type & FLOW_MAC_EXT && !is_zero_ether_addr(fsp->m_ext.h_dest)) { ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest); ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest); req->features |= BIT_ULL(NPC_DMAC); } if (!req->features) return -EOPNOTSUPP; return 0; } static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp) { struct ethhdr *eth_mask = &fsp->m_u.ether_spec; struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; u64 ring_cookie = fsp->ring_cookie; u32 flow_type; if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) return false; flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); /* CGX/RPM block dmac filtering configured for white listing * check for action other than DROP */ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && !ethtool_get_flow_spec_ring_vf(ring_cookie)) { if (is_zero_ether_addr(eth_mask->h_dest) && is_valid_ether_addr(eth_hdr->h_dest)) return true; } return false; } static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; #ifdef CONFIG_DCB int vlan_prio, qidx, pfc_rule = 0; #endif struct npc_install_flow_req *req; int err, vf = 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_prepare_flow_request(&flow->flow_spec, req); if (err) { /* free the allocated msg above */ otx2_mbox_reset(&pfvf->mbox.mbox, 0); mutex_unlock(&pfvf->mbox.lock); return err; } req->entry = flow->entry; req->intf = NIX_INTF_RX; req->set_cntr = 1; req->channel = pfvf->hw.rx_chan_base; if (ring_cookie == RX_CLS_FLOW_DISC) { req->op = NIX_RX_ACTIONOP_DROP; } else { /* change to unicast only if action of default entry is not * requested by user */ if (flow->flow_spec.flow_type & FLOW_RSS) { req->op = NIX_RX_ACTIONOP_RSS; req->index = flow->rss_ctx_id; req->flow_key_alg = pfvf->hw.flowkey_alg_idx; } else { req->op = NIX_RX_ACTIONOP_UCAST; req->index = ethtool_get_flow_spec_ring(ring_cookie); } vf = ethtool_get_flow_spec_ring_vf(ring_cookie); if (vf > pci_num_vf(pfvf->pdev)) { mutex_unlock(&pfvf->mbox.lock); return -EINVAL; } #ifdef CONFIG_DCB /* Identify PFC rule if PFC enabled and ntuple rule is vlan */ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) && pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) { vlan_prio = ntohs(req->packet.vlan_tci) & ntohs(req->mask.vlan_tci); /* Get the priority */ vlan_prio >>= 13; flow->rule_type |= PFC_FLOWCTRL_RULE; /* Check if PFC enabled for this priority */ if (pfvf->pfc_en & BIT(vlan_prio)) { pfc_rule = true; qidx = req->index; } } #endif } /* ethtool ring_cookie has (VF + 1) for VF */ if (vf) { req->vf = vf; flow->is_vf = true; flow->vf = vf; } /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); #ifdef CONFIG_DCB if (!err && pfc_rule) otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true); #endif mutex_unlock(&pfvf->mbox.lock); return err; } static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, struct otx2_flow *flow) { struct otx2_flow *pf_mac; struct ethhdr *eth_hdr; pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); if (!pf_mac) return -ENOMEM; pf_mac->entry = 0; pf_mac->rule_type |= DMAC_FILTER_RULE; pf_mac->location = pfvf->flow_cfg->max_flows; memcpy(&pf_mac->flow_spec, &flow->flow_spec, sizeof(struct ethtool_rx_flow_spec)); pf_mac->flow_spec.location = pf_mac->location; /* Copy PF mac address */ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); /* Install DMAC filter with PF mac address */ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); otx2_add_flow_to_list(pfvf, pf_mac); pfvf->flow_cfg->nr_flows++; set_bit(0, pfvf->flow_cfg->dmacflt_bmap); return 0; } int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; struct ethhdr *eth_hdr; bool new = false; int err = 0; u32 ring; if (!flow_cfg->max_flows) { netdev_err(pfvf->netdev, "Ntuple rule count is 0, allocate and retry\n"); return -EINVAL; } ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ flow->flow_spec = *fsp; if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { eth_hdr = &flow->flow_spec.h_u.ether_spec; /* Sync dmac filter table with updated fields */ if (flow->rule_type & DMAC_FILTER_RULE) return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, flow->entry); if (bitmap_full(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) { netdev_warn(pfvf->netdev, "Can't insert the rule %d as max allowed dmac filters are %d\n", flow->location + flow_cfg->dmacflt_max_flows, flow_cfg->dmacflt_max_flows); err = -EINVAL; if (new) kfree(flow); return err; } /* Install PF mac address to DMAC filter list */ if (!test_bit(0, flow_cfg->dmacflt_bmap)) otx2_add_flow_with_pfmac(pfvf, flow); flow->rule_type |= DMAC_FILTER_RULE; flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows); fsp->location = flow_cfg->max_flows + flow->entry; flow->flow_spec.location = fsp->location; flow->location = fsp->location; set_bit(flow->entry, flow_cfg->dmacflt_bmap); otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); } else { if (flow->location >= pfvf->flow_cfg->max_flows) { netdev_warn(pfvf->netdev, "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", flow->location, flow_cfg->max_flows - 1); err = -EINVAL; } else { err = otx2_add_flow_msg(pfvf, flow); } } if (err) { if (err == MBOX_MSG_INVALID) err = -EINVAL; if (new) kfree(flow); return err; } /* add the new flow installed to list */ if (new) { otx2_add_flow_to_list(pfvf, flow); flow_cfg->nr_flows++; } return 0; } static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) { struct npc_delete_flow_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->entry = entry; if (all) req->all = 1; /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) { struct otx2_flow *iter; struct ethhdr *eth_hdr; bool found = false; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) { eth_hdr = &iter->flow_spec.h_u.ether_spec; if (req == DMAC_ADDR_DEL) { otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, 0); clear_bit(0, pfvf->flow_cfg->dmacflt_bmap); found = true; } else { ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); } break; } } if (found) { list_del(&iter->list); kfree(iter); pfvf->flow_cfg->nr_flows--; } } int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; if (flow->rule_type & DMAC_FILTER_RULE) { struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; /* user not allowed to remove dmac filter with interface mac */ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) return -EPERM; err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, flow->entry); clear_bit(flow->entry, flow_cfg->dmacflt_bmap); /* If all dmac filters are removed delete macfilter with * interface mac address and configure CGX/RPM block in * promiscuous mode */ if (bitmap_weight(flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows) == 1) otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); } else { #ifdef CONFIG_DCB if (flow->rule_type & PFC_FLOWCTRL_RULE) otx2_update_bpid_in_rqctx(pfvf, 0, flow->flow_spec.ring_cookie, false); #endif err = otx2_remove_flow_msg(pfvf, flow->entry, false); } if (err) return err; list_del(&flow->list); kfree(flow); flow_cfg->nr_flows--; return 0; } void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id) { struct otx2_flow *flow, *tmp; int err; list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) { if (flow->rss_ctx_id != ctx_id) continue; err = otx2_remove_flow(pfvf, flow->location); if (err) netdev_warn(pfvf->netdev, "Can't delete the rule %d associated with this rss group err:%d", flow->location, err); } } int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_delete_flow_req *req; struct otx2_flow *iter, *tmp; int err; if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return 0; if (!flow_cfg->max_flows) return 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->start = flow_cfg->flow_ent[0]; req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1]; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { list_del(&iter->list); kfree(iter); flow_cfg->nr_flows--; } return err; } int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_free_entry_req *req; struct otx2_flow *iter, *tmp; int err; if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) return 0; /* remove all flows */ err = otx2_remove_flow_msg(pfvf, 0, true); if (err) return err; list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { list_del(&iter->list); kfree(iter); flow_cfg->nr_flows--; } mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->all = 1; /* Send message to AF to free MCAM entries */ err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; mutex_unlock(&pfvf->mbox.lock); return 0; } int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_install_flow_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; req->intf = NIX_INTF_RX; ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr); eth_broadcast_addr((u8 *)&req->mask.dmac); req->channel = pfvf->hw.rx_chan_base; req->op = NIX_RX_ACTION_DEFAULT; req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); req->vtag0_valid = true; req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_delete_flow_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset]; /* Send message to AF */ err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) { struct nix_vtag_config *req; struct mbox_msghdr *rsp_hdr; int err; /* Dont have enough mcam entries */ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)) return -ENOMEM; if (enable) { err = otx2_install_rxvlan_offload_flow(pf); if (err) return err; } else { err = otx2_delete_rxvlan_offload_flow(pf); if (err) return err; } mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } /* config strip, capture and size */ req->vtag_size = VTAGSIZE_T4; req->cfg_type = 1; /* rx vlan cfg */ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; req->rx.strip_vtag = enable; req->rx.capture_vtag = enable; err = otx2_sync_mbox_msg(&pf->mbox); if (err) { mutex_unlock(&pf->mbox.lock); return err; } rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp_hdr)) { mutex_unlock(&pf->mbox.lock); return PTR_ERR(rsp_hdr); } mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) { struct otx2_flow *iter; struct ethhdr *eth_hdr; list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { if (iter->rule_type & DMAC_FILTER_RULE) { eth_hdr = &iter->flow_spec.h_u.ether_spec; otx2_dmacflt_add(pf, eth_hdr->h_dest, iter->entry); } } } void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) { otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell MACSEC hardware offload driver * * Copyright (C) 2022 Marvell. */ #include <crypto/skcipher.h> #include <linux/rtnetlink.h> #include <linux/bitfield.h> #include "otx2_common.h" #define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0) #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48) #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0) #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32) #define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9) #define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18) #define MCS_RX_SECY_PLCY_RP BIT_ULL(17) #define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16) #define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5) #define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1) #define MCS_RX_SECY_PLCY_ENA BIT_ULL(0) #define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28) #define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22) #define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15) #define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14) #define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13) #define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2) #define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1) #define MCS_TX_SECY_PLCY_ENA BIT_ULL(0) #define MCS_GCM_AES_128 0 #define MCS_GCM_AES_256 1 #define MCS_GCM_AES_XPN_128 2 #define MCS_GCM_AES_XPN_256 3 #define MCS_TCI_ES 0x40 /* end station */ #define MCS_TCI_SC 0x20 /* SCI present */ #define MCS_TCI_SCB 0x10 /* epon */ #define MCS_TCI_E 0x08 /* encryption */ #define MCS_TCI_C 0x04 /* changed text */ #define CN10K_MAX_HASH_LEN 16 #define CN10K_MAX_SAK_LEN 32 static int cn10k_ecb_aes_encrypt(struct otx2_nic *pfvf, u8 *sak, u16 sak_len, u8 *hash) { u8 data[CN10K_MAX_HASH_LEN] = { 0 }; struct skcipher_request *req = NULL; struct scatterlist sg_src, sg_dst; struct crypto_skcipher *tfm; DECLARE_CRYPTO_WAIT(wait); int err; tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); if (IS_ERR(tfm)) { dev_err(pfvf->dev, "failed to allocate transform for ecb-aes\n"); return PTR_ERR(tfm); } req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { dev_err(pfvf->dev, "failed to allocate request for skcipher\n"); err = -ENOMEM; goto free_tfm; } err = crypto_skcipher_setkey(tfm, sak, sak_len); if (err) { dev_err(pfvf->dev, "failed to set key for skcipher\n"); goto free_req; } /* build sg list */ sg_init_one(&sg_src, data, CN10K_MAX_HASH_LEN); sg_init_one(&sg_dst, hash, CN10K_MAX_HASH_LEN); skcipher_request_set_callback(req, 0, crypto_req_done, &wait); skcipher_request_set_crypt(req, &sg_src, &sg_dst, CN10K_MAX_HASH_LEN, NULL); err = crypto_skcipher_encrypt(req); err = crypto_wait_req(err, &wait); free_req: skcipher_request_free(req); free_tfm: crypto_free_skcipher(tfm); return err; } static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg, struct macsec_secy *secy) { struct cn10k_mcs_txsc *txsc; list_for_each_entry(txsc, &cfg->txsc_list, entry) { if (txsc->sw_secy == secy) return txsc; } return NULL; } static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg, struct macsec_secy *secy, struct macsec_rx_sc *rx_sc) { struct cn10k_mcs_rxsc *rxsc; list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy) return rxsc; } return NULL; } static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) { switch (rsrc_type) { case MCS_RSRC_TYPE_FLOWID: return "FLOW"; case MCS_RSRC_TYPE_SC: return "SC"; case MCS_RSRC_TYPE_SECY: return "SECY"; case MCS_RSRC_TYPE_SA: return "SA"; default: return "Unknown"; }; return "Unknown"; } static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, enum mcs_rsrc_type type, u16 *rsrc_id) { struct mbox *mbox = &pfvf->mbox; struct mcs_alloc_rsrc_req *req; struct mcs_alloc_rsrc_rsp *rsp; int ret = -ENOMEM; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox); if (!req) goto fail; req->rsrc_type = type; req->rsrc_cnt = 1; req->dir = dir; ret = otx2_sync_mbox_msg(mbox); if (ret) goto fail; rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt || req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) { ret = -EINVAL; goto fail; } switch (rsp->rsrc_type) { case MCS_RSRC_TYPE_FLOWID: *rsrc_id = rsp->flow_ids[0]; break; case MCS_RSRC_TYPE_SC: *rsrc_id = rsp->sc_ids[0]; break; case MCS_RSRC_TYPE_SECY: *rsrc_id = rsp->secy_ids[0]; break; case MCS_RSRC_TYPE_SA: *rsrc_id = rsp->sa_ids[0]; break; default: ret = -EINVAL; goto fail; } mutex_unlock(&mbox->lock); return 0; fail: dev_err(pfvf->dev, "Failed to allocate %s %s resource\n", dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); mutex_unlock(&mbox->lock); return ret; } static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, enum mcs_rsrc_type type, u16 hw_rsrc_id, bool all) { struct mcs_clear_stats *clear_req; struct mbox *mbox = &pfvf->mbox; struct mcs_free_rsrc_req *req; mutex_lock(&mbox->lock); clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); if (!clear_req) goto fail; clear_req->id = hw_rsrc_id; clear_req->type = type; clear_req->dir = dir; req = otx2_mbox_alloc_msg_mcs_free_resources(mbox); if (!req) goto fail; req->rsrc_id = hw_rsrc_id; req->rsrc_type = type; req->dir = dir; if (all) req->all = 1; if (otx2_sync_mbox_msg(&pfvf->mbox)) goto fail; mutex_unlock(&mbox->lock); return; fail: dev_err(pfvf->dev, "Failed to free %s %s resource\n", dir == MCS_TX ? "TX" : "RX", rsrc_name(type)); mutex_unlock(&mbox->lock); } static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id) { return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id); } static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id) { return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id); } static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id) { cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false); } static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id) { cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false); } static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf, struct macsec_secy *secy, u8 hw_secy_id) { struct mcs_secy_plcy_write_req *req; struct mbox *mbox = &pfvf->mbox; u64 policy; u8 cipher; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window); if (secy->replay_protect) policy |= MCS_RX_SECY_PLCY_RP; policy |= MCS_RX_SECY_PLCY_AUTH_ENA; switch (secy->key_len) { case 16: cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128; break; case 32: cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256; break; default: cipher = MCS_GCM_AES_128; dev_warn(pfvf->dev, "Unsupported key length\n"); break; } policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, cipher); policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames); policy |= MCS_RX_SECY_PLCY_ENA; req->plcy = policy; req->secy_id = hw_secy_id; req->dir = MCS_RX; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf, struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) { struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; struct macsec_secy *secy = rxsc->sw_secy; struct mcs_flowid_entry_write_req *req; struct mbox *mbox = &pfvf->mbox; u64 mac_da; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } mac_da = ether_addr_to_u64(secy->netdev->dev_addr); req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da); req->mask[0] = ~0ULL; req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK; req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC); req->mask[1] = ~0ULL; req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK; req->mask[2] = ~0ULL; req->mask[3] = ~0ULL; req->flow_id = rxsc->hw_flow_id; req->secy_id = hw_secy_id; req->sc_id = rxsc->hw_sc_id; req->dir = MCS_RX; if (sw_rx_sc->active) req->ena = 1; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf, struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) { struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; struct mcs_rx_sc_cam_write_req *sc_req; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox); if (!sc_req) { ret = -ENOMEM; goto fail; } sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci); sc_req->sc_id = rxsc->hw_sc_id; sc_req->secy_id = hw_secy_id; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_keys(struct otx2_nic *pfvf, struct macsec_secy *secy, struct mcs_sa_plcy_write_req *req, u8 *sak, u8 *salt, ssci_t ssci) { u8 hash_rev[CN10K_MAX_HASH_LEN]; u8 sak_rev[CN10K_MAX_SAK_LEN]; u8 salt_rev[MACSEC_SALT_LEN]; u8 hash[CN10K_MAX_HASH_LEN]; u32 ssci_63_32; int err, i; err = cn10k_ecb_aes_encrypt(pfvf, sak, secy->key_len, hash); if (err) { dev_err(pfvf->dev, "Generating hash using ECB(AES) failed\n"); return err; } for (i = 0; i < secy->key_len; i++) sak_rev[i] = sak[secy->key_len - 1 - i]; for (i = 0; i < CN10K_MAX_HASH_LEN; i++) hash_rev[i] = hash[CN10K_MAX_HASH_LEN - 1 - i]; for (i = 0; i < MACSEC_SALT_LEN; i++) salt_rev[i] = salt[MACSEC_SALT_LEN - 1 - i]; ssci_63_32 = (__force u32)cpu_to_be32((__force u32)ssci); memcpy(&req->plcy[0][0], sak_rev, secy->key_len); memcpy(&req->plcy[0][4], hash_rev, CN10K_MAX_HASH_LEN); memcpy(&req->plcy[0][6], salt_rev, MACSEC_SALT_LEN); req->plcy[0][7] |= (u64)ssci_63_32 << 32; return 0; } static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_rxsc *rxsc, u8 assoc_num, bool sa_in_use) { struct mcs_sa_plcy_write_req *plcy_req; u8 *sak = rxsc->sa_key[assoc_num]; u8 *salt = rxsc->salt[assoc_num]; struct mcs_rx_sc_sa_map *map_req; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); if (!plcy_req) { ret = -ENOMEM; goto fail; } map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox); if (!map_req) { otx2_mbox_reset(&mbox->mbox, 0); ret = -ENOMEM; goto fail; } ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak, salt, rxsc->ssci[assoc_num]); if (ret) goto fail; plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num]; plcy_req->sa_cnt = 1; plcy_req->dir = MCS_RX; map_req->sa_index = rxsc->hw_sa_id[assoc_num]; map_req->sa_in_use = sa_in_use; map_req->sc_id = rxsc->hw_sc_id; map_req->an = assoc_num; /* Send two messages together */ ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf, struct cn10k_mcs_rxsc *rxsc, u8 assoc_num, u64 next_pn) { struct mcs_pn_table_write_req *req; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->pn_id = rxsc->hw_sa_id[assoc_num]; req->next_pn = next_pn; req->dir = MCS_RX; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc) { struct mcs_secy_plcy_write_req *req; struct mbox *mbox = &pfvf->mbox; struct macsec_tx_sc *sw_tx_sc; u8 sectag_tci = 0; u8 tag_offset; u64 policy; u8 cipher; int ret; /* Insert SecTag after 12 bytes (DA+SA) or 16 bytes * if VLAN tag needs to be sent in clear text. */ tag_offset = txsc->vlan_dev ? 16 : 12; sw_tx_sc = &secy->tx_sc; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } if (sw_tx_sc->send_sci) { sectag_tci |= MCS_TCI_SC; } else { if (sw_tx_sc->end_station) sectag_tci |= MCS_TCI_ES; if (sw_tx_sc->scb) sectag_tci |= MCS_TCI_SCB; } if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); policy |= MCS_TX_SECY_PLCY_INS_MODE; policy |= MCS_TX_SECY_PLCY_AUTH_ENA; switch (secy->key_len) { case 16: cipher = secy->xpn ? MCS_GCM_AES_XPN_128 : MCS_GCM_AES_128; break; case 32: cipher = secy->xpn ? MCS_GCM_AES_XPN_256 : MCS_GCM_AES_256; break; default: cipher = MCS_GCM_AES_128; dev_warn(pfvf->dev, "Unsupported key length\n"); break; } policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, cipher); if (secy->protect_frames) policy |= MCS_TX_SECY_PLCY_PROTECT; /* If the encodingsa does not exist/active and protect is * not set then frames can be sent out as it is. Hence enable * the policy irrespective of secy operational when !protect. */ if (!secy->protect_frames || secy->operational) policy |= MCS_TX_SECY_PLCY_ENA; req->plcy = policy; req->secy_id = txsc->hw_secy_id_tx; req->dir = MCS_TX; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc) { struct mcs_flowid_entry_write_req *req; struct mbox *mbox = &pfvf->mbox; u64 mac_sa; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } mac_sa = ether_addr_to_u64(secy->netdev->dev_addr); req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa); req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16); req->mask[0] = ~0ULL; req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK; req->mask[1] = ~0ULL; req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK; req->mask[2] = ~0ULL; req->mask[3] = ~0ULL; req->flow_id = txsc->hw_flow_id; req->secy_id = txsc->hw_secy_id_tx; req->sc_id = txsc->hw_sc_id; req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci); req->dir = MCS_TX; /* This can be enabled since stack xmits packets only when interface is up */ req->ena = 1; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc, u8 sa_num, bool sa_active) { struct mcs_tx_sc_sa_map *map_req; struct mbox *mbox = &pfvf->mbox; int ret; /* Link the encoding_sa only to SC out of all SAs */ if (txsc->encoding_sa != sa_num) return 0; mutex_lock(&mbox->lock); map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox); if (!map_req) { otx2_mbox_reset(&mbox->mbox, 0); ret = -ENOMEM; goto fail; } map_req->sa_index0 = txsc->hw_sa_id[sa_num]; map_req->sa_index0_vld = sa_active; map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci); map_req->sc_id = txsc->hw_sc_id; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc, u8 assoc_num) { struct mcs_sa_plcy_write_req *plcy_req; u8 *sak = txsc->sa_key[assoc_num]; u8 *salt = txsc->salt[assoc_num]; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox); if (!plcy_req) { ret = -ENOMEM; goto fail; } ret = cn10k_mcs_write_keys(pfvf, secy, plcy_req, sak, salt, txsc->ssci[assoc_num]); if (ret) goto fail; plcy_req->plcy[0][8] = assoc_num; plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num]; plcy_req->sa_cnt = 1; plcy_req->dir = MCS_TX; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf, struct cn10k_mcs_txsc *txsc, u8 assoc_num, u64 next_pn) { struct mcs_pn_table_write_req *req; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->pn_id = txsc->hw_sa_id[assoc_num]; req->next_pn = next_pn; req->dir = MCS_TX; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id, bool enable, enum mcs_direction dir) { struct mcs_flowid_ena_dis_entry *req; struct mbox *mbox = &pfvf->mbox; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->flow_id = hw_flow_id; req->ena = enable; req->dir = dir; ret = otx2_sync_mbox_msg(mbox); fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id, struct mcs_sa_stats *rsp_p, enum mcs_direction dir, bool clear) { struct mcs_clear_stats *clear_req; struct mbox *mbox = &pfvf->mbox; struct mcs_stats_req *req; struct mcs_sa_stats *rsp; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->id = hw_sa_id; req->dir = dir; if (!clear) goto send_msg; clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); if (!clear_req) { ret = -ENOMEM; goto fail; } clear_req->id = hw_sa_id; clear_req->dir = dir; clear_req->type = MCS_RSRC_TYPE_SA; send_msg: ret = otx2_sync_mbox_msg(mbox); if (ret) goto fail; rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { ret = PTR_ERR(rsp); goto fail; } memcpy(rsp_p, rsp, sizeof(*rsp_p)); mutex_unlock(&mbox->lock); return 0; fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id, struct mcs_sc_stats *rsp_p, enum mcs_direction dir, bool clear) { struct mcs_clear_stats *clear_req; struct mbox *mbox = &pfvf->mbox; struct mcs_stats_req *req; struct mcs_sc_stats *rsp; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->id = hw_sc_id; req->dir = dir; if (!clear) goto send_msg; clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); if (!clear_req) { ret = -ENOMEM; goto fail; } clear_req->id = hw_sc_id; clear_req->dir = dir; clear_req->type = MCS_RSRC_TYPE_SC; send_msg: ret = otx2_sync_mbox_msg(mbox); if (ret) goto fail; rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { ret = PTR_ERR(rsp); goto fail; } memcpy(rsp_p, rsp, sizeof(*rsp_p)); mutex_unlock(&mbox->lock); return 0; fail: mutex_unlock(&mbox->lock); return ret; } static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id, struct mcs_secy_stats *rsp_p, enum mcs_direction dir, bool clear) { struct mcs_clear_stats *clear_req; struct mbox *mbox = &pfvf->mbox; struct mcs_secy_stats *rsp; struct mcs_stats_req *req; int ret; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox); if (!req) { ret = -ENOMEM; goto fail; } req->id = hw_secy_id; req->dir = dir; if (!clear) goto send_msg; clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); if (!clear_req) { ret = -ENOMEM; goto fail; } clear_req->id = hw_secy_id; clear_req->dir = dir; clear_req->type = MCS_RSRC_TYPE_SECY; send_msg: ret = otx2_sync_mbox_msg(mbox); if (ret) goto fail; rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { ret = PTR_ERR(rsp); goto fail; } memcpy(rsp_p, rsp, sizeof(*rsp_p)); mutex_unlock(&mbox->lock); return 0; fail: mutex_unlock(&mbox->lock); return ret; } static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf) { struct cn10k_mcs_txsc *txsc; int ret; txsc = kzalloc(sizeof(*txsc), GFP_KERNEL); if (!txsc) return ERR_PTR(-ENOMEM); ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, &txsc->hw_flow_id); if (ret) goto fail; /* For a SecY, one TX secy and one RX secy HW resources are needed */ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, &txsc->hw_secy_id_tx); if (ret) goto free_flowid; ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, &txsc->hw_secy_id_rx); if (ret) goto free_tx_secy; ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, &txsc->hw_sc_id); if (ret) goto free_rx_secy; return txsc; free_rx_secy: cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, txsc->hw_secy_id_rx, false); free_tx_secy: cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, txsc->hw_secy_id_tx, false); free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, txsc->hw_flow_id, false); fail: kfree(txsc); return ERR_PTR(ret); } /* Free Tx SC and its SAs(if any) resources to AF */ static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf, struct cn10k_mcs_txsc *txsc) { u8 sa_bmap = txsc->sa_bmap; u8 sa_num = 0; while (sa_bmap) { if (sa_bmap & 1) { cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy, txsc, sa_num); cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); } sa_num++; sa_bmap >>= 1; } cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC, txsc->hw_sc_id, false); cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, txsc->hw_secy_id_rx, false); cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, txsc->hw_secy_id_tx, false); cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, txsc->hw_flow_id, false); } static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf) { struct cn10k_mcs_rxsc *rxsc; int ret; rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL); if (!rxsc) return ERR_PTR(-ENOMEM); ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, &rxsc->hw_flow_id); if (ret) goto fail; ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, &rxsc->hw_sc_id); if (ret) goto free_flowid; return rxsc; free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, rxsc->hw_flow_id, false); fail: kfree(rxsc); return ERR_PTR(ret); } /* Free Rx SC and its SAs(if any) resources to AF */ static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf, struct cn10k_mcs_rxsc *rxsc) { u8 sa_bmap = rxsc->sa_bmap; u8 sa_num = 0; while (sa_bmap) { if (sa_bmap & 1) { cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc, sa_num, false); cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); } sa_num++; sa_bmap >>= 1; } cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC, rxsc->hw_sc_id, false); cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, rxsc->hw_flow_id, false); } static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc, struct macsec_tx_sa *sw_tx_sa, u8 sa_num) { if (sw_tx_sa) { cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn); cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); } cn10k_mcs_write_tx_secy(pfvf, secy, txsc); cn10k_mcs_write_tx_flowid(pfvf, secy, txsc); /* When updating secy, change RX secy also */ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx); return 0; } static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy, u8 hw_secy_id) { struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct cn10k_mcs_rxsc *mcs_rx_sc; struct macsec_rx_sc *sw_rx_sc; struct macsec_rx_sa *sw_rx_sa; u8 sa_num; for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); if (unlikely(!mcs_rx_sc)) continue; for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) { sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]); if (!sw_rx_sa) continue; cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc, sa_num, sw_rx_sa->active); cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num, sw_rx_sa->next_pn); } cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id); cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id); } return 0; } static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf, struct macsec_secy *secy, bool delete) { struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct cn10k_mcs_rxsc *mcs_rx_sc; struct macsec_rx_sc *sw_rx_sc; int ret; for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active; sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) { mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); if (unlikely(!mcs_rx_sc)) continue; ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id, false, MCS_RX); if (ret) dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n", mcs_rx_sc->hw_sc_id); if (delete) { cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc); list_del(&mcs_rx_sc->entry); kfree(mcs_rx_sc); } } return 0; } static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy, struct cn10k_mcs_txsc *txsc) { struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct mcs_secy_stats rx_rsp = { 0 }; struct mcs_sc_stats sc_rsp = { 0 }; struct cn10k_mcs_rxsc *rxsc; /* Because of shared counters for some stats in the hardware, when * updating secy policy take a snapshot of current stats and reset them. * Below are the effected stats because of shared counters. */ /* Check if sync is really needed */ if (secy->validate_frames == txsc->last_validate_frames && secy->replay_protect == txsc->last_replay_protect) return; cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT) txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; else txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; list_for_each_entry(rxsc, &cfg->rxsc_list, entry) { cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true); rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt; rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt; rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt; rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt; if (txsc->last_replay_protect) rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt; else rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt; if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED) rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt; else rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt; } txsc->last_validate_frames = secy->validate_frames; txsc->last_replay_protect = secy->replay_protect; } static int cn10k_mdo_open(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct macsec_tx_sa *sw_tx_sa; struct cn10k_mcs_txsc *txsc; u8 sa_num; int err; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; sa_num = txsc->encoding_sa; sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); if (err) return err; return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx); } static int cn10k_mdo_stop(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct cn10k_mcs_txsc *txsc; int err; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); if (err) return err; return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false); } static int cn10k_mdo_add_secy(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct cn10k_mcs_txsc *txsc; if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) return -EOPNOTSUPP; txsc = cn10k_mcs_create_txsc(pfvf); if (IS_ERR(txsc)) return -ENOSPC; txsc->sw_secy = secy; txsc->encoding_sa = secy->tx_sc.encoding_sa; txsc->last_validate_frames = secy->validate_frames; txsc->last_replay_protect = secy->replay_protect; txsc->vlan_dev = is_vlan_dev(ctx->netdev); list_add(&txsc->entry, &cfg->txsc_list); if (netif_running(secy->netdev)) return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); return 0; } static int cn10k_mdo_upd_secy(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct macsec_tx_sa *sw_tx_sa; struct cn10k_mcs_txsc *txsc; bool active; u8 sa_num; int err; txsc = cn10k_mcs_get_txsc(cfg, secy); if (!txsc) return -ENOENT; /* Encoding SA got changed */ if (txsc->encoding_sa != secy->tx_sc.encoding_sa) { txsc->encoding_sa = secy->tx_sc.encoding_sa; sa_num = txsc->encoding_sa; sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); active = sw_tx_sa ? sw_tx_sa->active : false; cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active); } if (netif_running(secy->netdev)) { cn10k_mcs_sync_stats(pfvf, secy, txsc); err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); if (err) return err; } return 0; } static int cn10k_mdo_del_secy(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct cn10k_mcs_txsc *txsc; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX); cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true); cn10k_mcs_delete_txsc(pfvf, txsc); list_del(&txsc->entry); kfree(txsc); return 0; } static int cn10k_mdo_add_txsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_txsc *txsc; int err; txsc = cn10k_mcs_get_txsc(cfg, secy); if (!txsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num])) return -ENOSPC; memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len); memcpy(&txsc->salt[sa_num], sw_tx_sa->key.salt.bytes, MACSEC_SALT_LEN); txsc->ssci[sa_num] = sw_tx_sa->ssci; txsc->sa_bmap |= 1 << sa_num; if (netif_running(secy->netdev)) { err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num); if (err) return err; err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn); if (err) return err; err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); if (err) return err; } return 0; } static int cn10k_mdo_upd_txsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_txsc *txsc; int err; txsc = cn10k_mcs_get_txsc(cfg, secy); if (!txsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; if (netif_running(secy->netdev)) { /* Keys cannot be changed after creation */ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num, sw_tx_sa->next_pn); if (err) return err; err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, sw_tx_sa->active); if (err) return err; } return 0; } static int cn10k_mdo_del_txsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_txsc *txsc; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]); txsc->sa_bmap &= ~(1 << sa_num); return 0; } static int cn10k_mdo_add_rxsc(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct cn10k_mcs_rxsc *rxsc; struct cn10k_mcs_txsc *txsc; int err; txsc = cn10k_mcs_get_txsc(cfg, secy); if (!txsc) return -ENOENT; rxsc = cn10k_mcs_create_rxsc(pfvf); if (IS_ERR(rxsc)) return -ENOSPC; rxsc->sw_secy = ctx->secy; rxsc->sw_rxsc = ctx->rx_sc; list_add(&rxsc->entry, &cfg->rxsc_list); if (netif_running(secy->netdev)) { err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx); if (err) return err; err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx); if (err) return err; } return 0; } static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; bool enable = ctx->rx_sc->active; struct cn10k_mcs_rxsc *rxsc; rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); if (!rxsc) return -ENOENT; if (netif_running(secy->netdev)) return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, enable, MCS_RX); return 0; } static int cn10k_mdo_del_rxsc(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct cn10k_mcs_rxsc *rxsc; rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc); if (!rxsc) return -ENOENT; cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX); cn10k_mcs_delete_rxsc(pfvf, rxsc); list_del(&rxsc->entry); kfree(rxsc); return 0; } static int cn10k_mdo_add_rxsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; struct macsec_secy *secy = ctx->secy; bool sa_in_use = rx_sa->active; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_rxsc *rxsc; int err; rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); if (!rxsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num])) return -ENOSPC; memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len); memcpy(&rxsc->salt[sa_num], rx_sa->key.salt.bytes, MACSEC_SALT_LEN); rxsc->ssci[sa_num] = rx_sa->ssci; rxsc->sa_bmap |= 1 << sa_num; if (netif_running(secy->netdev)) { err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use); if (err) return err; err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, rx_sa->next_pn); if (err) return err; } return 0; } static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; struct macsec_secy *secy = ctx->secy; bool sa_in_use = rx_sa->active; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_rxsc *rxsc; int err; rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc); if (!rxsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; if (netif_running(secy->netdev)) { err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use); if (err) return err; err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, rx_sa->next_pn); if (err) return err; } return 0; } static int cn10k_mdo_del_rxsa(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_rxsc *rxsc; rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); if (!rxsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false); cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]); rxsc->sa_bmap &= ~(1 << sa_num); return 0; } static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 }; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct cn10k_mcs_txsc *txsc; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false); ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt; ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt; cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt; txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt; txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt; if (secy->validate_frames == MACSEC_VALIDATE_STRICT) txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt; else txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt; txsc->stats.InPktsOverrun = 0; ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag; ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged; ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag; ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI; ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI; ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun; return 0; } static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct mcs_sc_stats rsp = { 0 }; struct cn10k_mcs_txsc *txsc; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false); ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt; ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt; ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt; return 0; } static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct mcs_sa_stats rsp = { 0 }; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_txsc *txsc; txsc = cn10k_mcs_get_txsc(cfg, ctx->secy); if (!txsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false); ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt; ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt; return 0; } static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_secy *secy = ctx->secy; struct mcs_sc_stats rsp = { 0 }; struct cn10k_mcs_rxsc *rxsc; rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc); if (!rxsc) return -ENOENT; cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true); rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt; rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt; rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt; rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt; if (secy->replay_protect) rxsc->stats.InPktsLate += rsp.pkt_late_cnt; else rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt; if (secy->validate_frames == MACSEC_VALIDATE_DISABLED) rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt; else rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt; ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated; ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted; ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid; ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid; ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate; ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed; ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked; ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK; return 0; } static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx) { struct otx2_nic *pfvf = macsec_netdev_priv(ctx->netdev); struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc; struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct mcs_sa_stats rsp = { 0 }; u8 sa_num = ctx->sa.assoc_num; struct cn10k_mcs_rxsc *rxsc; rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc); if (!rxsc) return -ENOENT; if (sa_num >= CN10K_MCS_SA_PER_SC) return -EOPNOTSUPP; cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false); ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt; ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt; ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt; ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt; ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt; return 0; } static const struct macsec_ops cn10k_mcs_ops = { .mdo_dev_open = cn10k_mdo_open, .mdo_dev_stop = cn10k_mdo_stop, .mdo_add_secy = cn10k_mdo_add_secy, .mdo_upd_secy = cn10k_mdo_upd_secy, .mdo_del_secy = cn10k_mdo_del_secy, .mdo_add_rxsc = cn10k_mdo_add_rxsc, .mdo_upd_rxsc = cn10k_mdo_upd_rxsc, .mdo_del_rxsc = cn10k_mdo_del_rxsc, .mdo_add_rxsa = cn10k_mdo_add_rxsa, .mdo_upd_rxsa = cn10k_mdo_upd_rxsa, .mdo_del_rxsa = cn10k_mdo_del_rxsa, .mdo_add_txsa = cn10k_mdo_add_txsa, .mdo_upd_txsa = cn10k_mdo_upd_txsa, .mdo_del_txsa = cn10k_mdo_del_txsa, .mdo_get_dev_stats = cn10k_mdo_get_dev_stats, .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats, .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats, .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats, .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats, }; void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event) { struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg; struct macsec_tx_sa *sw_tx_sa = NULL; struct macsec_secy *secy = NULL; struct cn10k_mcs_txsc *txsc; u8 an; if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) return; if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT)) return; /* Find the SecY to which the expired hardware SA is mapped */ list_for_each_entry(txsc, &cfg->txsc_list, entry) { for (an = 0; an < CN10K_MCS_SA_PER_SC; an++) if (txsc->hw_sa_id[an] == event->sa_id) { secy = txsc->sw_secy; sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]); } } if (secy && sw_tx_sa) macsec_pn_wrapped(secy, sw_tx_sa); } int cn10k_mcs_init(struct otx2_nic *pfvf) { struct mbox *mbox = &pfvf->mbox; struct cn10k_mcs_cfg *cfg; struct mcs_intr_cfg *req; if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) return 0; cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return -ENOMEM; INIT_LIST_HEAD(&cfg->txsc_list); INIT_LIST_HEAD(&cfg->rxsc_list); pfvf->macsec_cfg = cfg; pfvf->netdev->features |= NETIF_F_HW_MACSEC; pfvf->netdev->macsec_ops = &cn10k_mcs_ops; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox); if (!req) goto fail; req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; if (otx2_sync_mbox_msg(mbox)) goto fail; mutex_unlock(&mbox->lock); return 0; fail: dev_err(pfvf->dev, "Cannot notify PN wrapped event\n"); mutex_unlock(&mbox->lock); return 0; } void cn10k_mcs_free(struct otx2_nic *pfvf) { if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag)) return; cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true); cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true); kfree(pfvf->macsec_cfg); pfvf->macsec_cfg = NULL; }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU PF/VF Netdev Devlink * * Copyright (C) 2021 Marvell. */ #include "otx2_common.h" /* Devlink Params APIs */ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; struct otx2_flow_config *flow_cfg; if (!pfvf->flow_cfg) { NL_SET_ERR_MSG_MOD(extack, "pfvf->flow_cfg not initialized"); return -EINVAL; } flow_cfg = pfvf->flow_cfg; if (flow_cfg && flow_cfg->nr_flows) { NL_SET_ERR_MSG_MOD(extack, "Cannot modify count when there are active rules"); return -EINVAL; } return 0; } static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; if (!pfvf->flow_cfg) return 0; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); return 0; } static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; struct otx2_flow_config *flow_cfg; if (!pfvf->flow_cfg) { ctx->val.vu16 = 0; return 0; } flow_cfg = pfvf->flow_cfg; ctx->val.vu16 = flow_cfg->max_flows; return 0; } enum otx2_dl_param_id { OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, }; static const struct devlink_param otx2_dl_params[] = { DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, "mcam_count", DEVLINK_PARAM_TYPE_U16, BIT(DEVLINK_PARAM_CMODE_RUNTIME), otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, otx2_dl_mcam_count_validate), }; static const struct devlink_ops otx2_devlink_ops = { }; int otx2_register_dl(struct otx2_nic *pfvf) { struct otx2_devlink *otx2_dl; struct devlink *dl; int err; dl = devlink_alloc(&otx2_devlink_ops, sizeof(struct otx2_devlink), pfvf->dev); if (!dl) { dev_warn(pfvf->dev, "devlink_alloc failed\n"); return -ENOMEM; } otx2_dl = devlink_priv(dl); otx2_dl->dl = dl; otx2_dl->pfvf = pfvf; pfvf->dl = otx2_dl; err = devlink_params_register(dl, otx2_dl_params, ARRAY_SIZE(otx2_dl_params)); if (err) { dev_err(pfvf->dev, "devlink params register failed with error %d", err); goto err_dl; } devlink_register(dl); return 0; err_dl: devlink_free(dl); return err; } void otx2_unregister_dl(struct otx2_nic *pfvf) { struct otx2_devlink *otx2_dl = pfvf->dl; struct devlink *dl = otx2_dl->dl; devlink_unregister(dl); devlink_params_unregister(dl, otx2_dl_params, ARRAY_SIZE(otx2_dl_params)); devlink_free(dl); }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. * */ #include "otx2_common.h" static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, u32 *dmac_index) { struct cgx_mac_addr_add_req *req; struct cgx_mac_addr_add_rsp *rsp; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); err = otx2_sync_mbox_msg(&pf->mbox); if (!err) { rsp = (struct cgx_mac_addr_add_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); *dmac_index = rsp->index; } mutex_unlock(&pf->mbox.lock); return err; } static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index) { struct cgx_mac_addr_set_or_get *req; struct cgx_mac_addr_set_or_get *rsp; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } req->index = *dmac_index; ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; rsp = (struct cgx_mac_addr_set_or_get *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); if (IS_ERR_OR_NULL(rsp)) { err = -EINVAL; goto out; } *dmac_index = rsp->index; out: mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) { u32 *dmacindex; /* Store dmacindex returned by CGX/RPM driver which will * be used for macaddr update/remove */ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) return otx2_dmacflt_add_pfmac(pf, dmacindex); else return otx2_dmacflt_do_add(pf, mac, dmacindex); } static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, u32 dmac_index) { struct cgx_mac_addr_del_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->index = dmac_index; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index) { struct cgx_mac_addr_reset_req *req; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } req->index = dmac_index; err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) { u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) return otx2_dmacflt_remove_pfmac(pf, dmacindex); else return otx2_dmacflt_do_remove(pf, mac, dmacindex); } /* CGX/RPM blocks support max unicast entries of 32. * on typical configuration MAC block associated * with 4 lmacs, each lmac will have 8 dmac entries */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) { struct cgx_max_dmac_entries_get_rsp *rsp; struct msg_req *msg; int err; mutex_lock(&pf->mbox.lock); msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); if (!msg) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; rsp = (struct cgx_max_dmac_entries_get_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); if (IS_ERR_OR_NULL(rsp)) { err = -EINVAL; goto out; } pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; out: mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) { struct cgx_mac_addr_update_req *req; struct cgx_mac_addr_update_rsp *rsp; int rc; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; /* check the response and change index */ rc = otx2_sync_mbox_msg(&pf->mbox); if (rc) goto out; rsp = (struct cgx_mac_addr_update_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; out: mutex_unlock(&pf->mbox.lock); return rc; }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2023 Marvell. * */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/bitfield.h> #include "otx2_common.h" #include "cn10k.h" #include "qos.h" #define OTX2_QOS_QID_INNER 0xFFFFU #define OTX2_QOS_QID_NONE 0xFFFEU #define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF #define OTX2_QOS_CLASS_NONE 0 #define OTX2_QOS_DEFAULT_PRIO 0xF #define OTX2_QOS_INVALID_SQ 0xFFFF #define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF #define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0) #define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0) static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; int tx_queues, qos_txqs, err; qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap, OTX2_QOS_MAX_LEAF_NODES); tx_queues = hw->tx_queues + qos_txqs; err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues); if (err) { netdev_err(pfvf->netdev, "Failed to set no of Tx queues: %d\n", tx_queues); return; } } static void otx2_qos_get_regaddr(struct otx2_qos_node *node, struct nix_txschq_config *cfg, int index) { if (node->level == NIX_TXSCH_LVL_SMQ) { cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq); cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq); cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq); cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq); } else if (node->level == NIX_TXSCH_LVL_TL4) { cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq); cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq); cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq); cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq); } else if (node->level == NIX_TXSCH_LVL_TL3) { cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq); cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq); cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq); cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq); } else if (node->level == NIX_TXSCH_LVL_TL2) { cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq); cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq); cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq); cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq); } } static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum) { u32 weight; weight = quantum / pfvf->hw.dwrr_mtu; if (quantum % pfvf->hw.dwrr_mtu) weight += 1; return weight; } static void otx2_config_sched_shaping(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct nix_txschq_config *cfg, int *num_regs) { u32 rr_weight; u32 quantum; u64 maxrate; otx2_qos_get_regaddr(node, cfg, *num_regs); /* configure parent txschq */ cfg->regval[*num_regs] = node->parent->schq << 16; (*num_regs)++; /* configure prio/quantum */ if (node->qid == OTX2_QOS_QID_NONE) { cfg->regval[*num_regs] = node->prio << 24 | mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); (*num_regs)++; return; } /* configure priority/quantum */ if (node->is_static) { cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24; } else { quantum = node->quantum ? node->quantum : pfvf->tx_max_pktlen; rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 | rr_weight; } (*num_regs)++; /* configure PIR */ maxrate = (node->rate > node->ceil) ? node->rate : node->ceil; cfg->regval[*num_regs] = otx2_get_txschq_rate_regval(pfvf, maxrate, 65536); (*num_regs)++; /* Don't configure CIR when both CIR+PIR not supported * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock */ if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag)) return; cfg->regval[*num_regs] = otx2_get_txschq_rate_regval(pfvf, node->rate, 65536); (*num_regs)++; } static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct nix_txschq_config *cfg) { struct otx2_hw *hw = &pfvf->hw; int num_regs = 0; u8 level; level = node->level; /* program txschq registers */ if (level == NIX_TXSCH_LVL_SMQ) { cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq); cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) | (0x2ULL << 36); num_regs++; otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL4) { otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL3) { /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); num_regs++; } otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); num_regs++; } /* check if node is root */ if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq); cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 | mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); num_regs++; goto txschq_cfg_out; } otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } txschq_cfg_out: cfg->num_regs = num_regs; } static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf, struct otx2_qos_node *parent) { struct mbox *mbox = &pfvf->mbox; struct nix_txschq_config *cfg; int rc; if (parent->level == NIX_TXSCH_LVL_MDQ) return 0; mutex_lock(&mbox->lock); cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!cfg) { mutex_unlock(&mbox->lock); return -ENOMEM; } cfg->lvl = parent->level; if (parent->level == NIX_TXSCH_LVL_TL4) cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq); else if (parent->level == NIX_TXSCH_LVL_TL3) cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq); else if (parent->level == NIX_TXSCH_LVL_TL2) cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq); else if (parent->level == NIX_TXSCH_LVL_TL1) cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq); cfg->regval[0] = (u64)parent->prio_anchor << 32; cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ? parent->child_dwrr_prio : 0) << 1; cfg->num_regs++; rc = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&mbox->lock); return rc; } static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf, struct otx2_qos_node *parent) { struct otx2_qos_node *node; list_for_each_entry_reverse(node, &parent->child_schq_list, list) otx2_txschq_free_one(pfvf, node->level, node->schq); } static void otx2_qos_free_hw_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent) { struct otx2_qos_node *node, *tmp; list_for_each_entry_safe(node, tmp, &parent->child_list, list) { otx2_qos_free_hw_node(pfvf, node); otx2_qos_free_hw_node_schq(pfvf, node); otx2_txschq_free_one(pfvf, node->level, node->schq); } } static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node) { mutex_lock(&pfvf->qos.qos_lock); /* free child node hw mappings */ otx2_qos_free_hw_node(pfvf, node); otx2_qos_free_hw_node_schq(pfvf, node); /* free node hw mappings */ otx2_txschq_free_one(pfvf, node->level, node->schq); mutex_unlock(&pfvf->qos.qos_lock); } static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf, struct otx2_qos_node *node) { hash_del_rcu(&node->hlist); if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) { __clear_bit(node->qid, pfvf->qos.qos_sq_bmap); otx2_qos_update_tx_netdev_queues(pfvf); } list_del(&node->list); kfree(node); } static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf, struct otx2_qos_node *parent) { struct otx2_qos_node *node, *tmp; list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) { list_del(&node->list); kfree(node); } } static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent) { struct otx2_qos_node *node, *tmp; list_for_each_entry_safe(node, tmp, &parent->child_list, list) { __otx2_qos_free_sw_node(pfvf, node); otx2_qos_free_sw_node_schq(pfvf, node); otx2_qos_sw_node_delete(pfvf, node); } } static void otx2_qos_free_sw_node(struct otx2_nic *pfvf, struct otx2_qos_node *node) { mutex_lock(&pfvf->qos.qos_lock); __otx2_qos_free_sw_node(pfvf, node); otx2_qos_free_sw_node_schq(pfvf, node); otx2_qos_sw_node_delete(pfvf, node); mutex_unlock(&pfvf->qos.qos_lock); } static void otx2_qos_destroy_node(struct otx2_nic *pfvf, struct otx2_qos_node *node) { otx2_qos_free_hw_cfg(pfvf, node); otx2_qos_free_sw_node(pfvf, node); } static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *node; list_for_each_entry(node, &parent->child_schq_list, list) cfg->schq[node->level]++; } static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *node; list_for_each_entry(node, &parent->child_list, list) { otx2_qos_fill_cfg_tl(node, cfg); otx2_qos_fill_cfg_schq(node, cfg); } /* Assign the required number of transmit schedular queues under the * given class */ cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt + parent->max_static_prio + 1; } static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *parent, struct otx2_qos_cfg *cfg) { mutex_lock(&pfvf->qos.qos_lock); otx2_qos_fill_cfg_tl(parent, cfg); mutex_unlock(&pfvf->qos.qos_lock); } static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *node; int cnt; list_for_each_entry(node, &parent->child_schq_list, list) { cnt = cfg->dwrr_node_pos[node->level]; cfg->schq_list[node->level][cnt] = node->schq; cfg->schq[node->level]++; cfg->dwrr_node_pos[node->level]++; } } static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *node; int cnt; list_for_each_entry(node, &parent->child_list, list) { otx2_qos_read_txschq_cfg_tl(node, cfg); cnt = cfg->static_node_pos[node->level]; cfg->schq_contig_list[node->level][cnt] = node->schq; cfg->schq_contig[node->level]++; cfg->static_node_pos[node->level]++; otx2_qos_read_txschq_cfg_schq(node, cfg); } } static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { mutex_lock(&pfvf->qos.qos_lock); otx2_qos_read_txschq_cfg_tl(node, cfg); mutex_unlock(&pfvf->qos.qos_lock); } static struct otx2_qos_node * otx2_qos_alloc_root(struct otx2_nic *pfvf) { struct otx2_qos_node *node; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return ERR_PTR(-ENOMEM); node->parent = NULL; if (!is_otx2_vf(pfvf->pcifunc)) { node->level = NIX_TXSCH_LVL_TL1; } else { node->level = NIX_TXSCH_LVL_TL2; node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; } WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); node->classid = OTX2_QOS_ROOT_CLASSID; hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid); list_add_tail(&node->list, &pfvf->qos.qos_tree); INIT_LIST_HEAD(&node->child_list); INIT_LIST_HEAD(&node->child_schq_list); return node; } static int otx2_qos_add_child_node(struct otx2_qos_node *parent, struct otx2_qos_node *node) { struct list_head *head = &parent->child_list; struct otx2_qos_node *tmp_node; struct list_head *tmp; if (node->prio > parent->max_static_prio) parent->max_static_prio = node->prio; for (tmp = head->next; tmp != head; tmp = tmp->next) { tmp_node = list_entry(tmp, struct otx2_qos_node, list); if (tmp_node->prio == node->prio && tmp_node->is_static) return -EEXIST; if (tmp_node->prio > node->prio) { list_add_tail(&node->list, tmp); return 0; } } list_add_tail(&node->list, head); return 0; } static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf, struct otx2_qos_node *node) { struct otx2_qos_node *txschq_node, *parent, *tmp; int lvl; parent = node; for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) { txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL); if (!txschq_node) goto err_out; txschq_node->parent = parent; txschq_node->level = lvl; txschq_node->classid = OTX2_QOS_CLASS_NONE; WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE); txschq_node->rate = 0; txschq_node->ceil = 0; txschq_node->prio = 0; txschq_node->quantum = 0; txschq_node->is_static = true; txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; mutex_lock(&pfvf->qos.qos_lock); list_add_tail(&txschq_node->list, &node->child_schq_list); mutex_unlock(&pfvf->qos.qos_lock); INIT_LIST_HEAD(&txschq_node->child_list); INIT_LIST_HEAD(&txschq_node->child_schq_list); parent = txschq_node; } return 0; err_out: list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list, list) { list_del(&txschq_node->list); kfree(txschq_node); } return -ENOMEM; } static struct otx2_qos_node * otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent, u16 classid, u32 prio, u64 rate, u64 ceil, u32 quantum, u16 qid, bool static_cfg) { struct otx2_qos_node *node; int err; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return ERR_PTR(-ENOMEM); node->parent = parent; node->level = parent->level - 1; node->classid = classid; WRITE_ONCE(node->qid, qid); node->rate = otx2_convert_rate(rate); node->ceil = otx2_convert_rate(ceil); node->prio = prio; node->quantum = quantum; node->is_static = static_cfg; node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; __set_bit(qid, pfvf->qos.qos_sq_bmap); hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid); mutex_lock(&pfvf->qos.qos_lock); err = otx2_qos_add_child_node(parent, node); if (err) { mutex_unlock(&pfvf->qos.qos_lock); return ERR_PTR(err); } mutex_unlock(&pfvf->qos.qos_lock); INIT_LIST_HEAD(&node->child_list); INIT_LIST_HEAD(&node->child_schq_list); err = otx2_qos_alloc_txschq_node(pfvf, node); if (err) { otx2_qos_sw_node_delete(pfvf, node); return ERR_PTR(-ENOMEM); } return node; } static struct otx2_qos_node * otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid) { struct otx2_qos_node *node = NULL; hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) { if (node->classid == classid) break; } return node; } static struct otx2_qos_node * otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid) { struct otx2_qos_node *node = NULL; hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) { if (node->classid == classid) break; } return node; } int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid) { struct otx2_qos_node *node; u16 qid; int res; node = otx2_sw_node_find_rcu(pfvf, classid); if (!node) { res = -ENOENT; goto out; } qid = READ_ONCE(node->qid); if (qid == OTX2_QOS_QID_INNER) { res = -EINVAL; goto out; } res = pfvf->hw.tx_queues + qid; out: return res; } static int otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node) { struct mbox *mbox = &pfvf->mbox; struct nix_txschq_config *req; int rc; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) { mutex_unlock(&mbox->lock); return -ENOMEM; } req->lvl = node->level; __otx2_qos_txschq_cfg(pfvf, node, req); rc = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&mbox->lock); return rc; } static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) { struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; struct mbox *mbox = &pfvf->mbox; int lvl, rc, schq; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); if (!req) { mutex_unlock(&mbox->lock); return -ENOMEM; } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { req->schq[lvl] = cfg->schq[lvl]; req->schq_contig[lvl] = cfg->schq_contig[lvl]; } rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) { mutex_unlock(&mbox->lock); return rc; } rsp = (struct nix_txsch_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { rc = PTR_ERR(rsp); goto out; } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) { cfg->schq_contig_list[lvl][schq] = rsp->schq_contig_list[lvl][schq]; } } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (schq = 0; schq < rsp->schq[lvl]; schq++) { cfg->schq_list[lvl][schq] = rsp->schq_list[lvl][schq]; } } pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl; pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; out: mutex_unlock(&mbox->lock); return rc; } static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) { int lvl, idx, schq; for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { if (!cfg->schq_index_used[lvl][idx]) { schq = cfg->schq_contig_list[lvl][idx]; otx2_txschq_free_one(pfvf, lvl, schq); } } } } static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *tmp; int cnt; list_for_each_entry(tmp, &node->child_schq_list, list) { cnt = cfg->dwrr_node_pos[tmp->level]; tmp->schq = cfg->schq_list[tmp->level][cnt]; cfg->dwrr_node_pos[tmp->level]++; } } static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *tmp; int cnt; list_for_each_entry(tmp, &node->child_list, list) { otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg); cnt = cfg->static_node_pos[tmp->level]; tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx]; cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true; if (cnt == 0) node->prio_anchor = cfg->schq_contig_list[tmp->level][0]; cfg->static_node_pos[tmp->level]++; otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg); } } static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { mutex_lock(&pfvf->qos.qos_lock); otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg); otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg); otx2_qos_free_unused_txschq(pfvf, cfg); mutex_unlock(&pfvf->qos.qos_lock); } static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, struct otx2_qos_node *tmp, unsigned long *child_idx_bmap, int child_cnt) { int idx; if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX) return; /* assign static nodes 1:1 prio mapping first, then remaining nodes */ for (idx = 0; idx < child_cnt; idx++) { if (tmp->is_static && tmp->prio == idx && !test_bit(idx, child_idx_bmap)) { tmp->txschq_idx = idx; set_bit(idx, child_idx_bmap); return; } else if (!tmp->is_static && idx >= tmp->prio && !test_bit(idx, child_idx_bmap)) { tmp->txschq_idx = idx; set_bit(idx, child_idx_bmap); return; } } } static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf, struct otx2_qos_node *node) { unsigned long *child_idx_bmap; struct otx2_qos_node *tmp; int child_cnt; list_for_each_entry(tmp, &node->child_list, list) tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX; /* allocate child index array */ child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1; child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt), sizeof(unsigned long), GFP_KERNEL); if (!child_idx_bmap) return -ENOMEM; list_for_each_entry(tmp, &node->child_list, list) otx2_qos_assign_base_idx_tl(pfvf, tmp); /* assign base index of static priority children first */ list_for_each_entry(tmp, &node->child_list, list) { if (!tmp->is_static) continue; __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, child_cnt); } /* assign base index of dwrr priority children */ list_for_each_entry(tmp, &node->child_list, list) __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap, child_cnt); kfree(child_idx_bmap); return 0; } static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf, struct otx2_qos_node *node) { int ret = 0; mutex_lock(&pfvf->qos.qos_lock); ret = otx2_qos_assign_base_idx_tl(pfvf, node); mutex_unlock(&pfvf->qos.qos_lock); return ret; } static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *tmp; int ret; list_for_each_entry(tmp, &node->child_schq_list, list) { ret = otx2_qos_txschq_config(pfvf, tmp); if (ret) return -EIO; ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent); if (ret) return -EIO; } return 0; } static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { struct otx2_qos_node *tmp; int ret; list_for_each_entry(tmp, &node->child_list, list) { ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg); if (ret) return -EIO; ret = otx2_qos_txschq_config(pfvf, tmp); if (ret) return -EIO; ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg); if (ret) return -EIO; } ret = otx2_qos_txschq_set_parent_topology(pfvf, node); if (ret) return -EIO; return 0; } static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { int ret; mutex_lock(&pfvf->qos.qos_lock); ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg); if (ret) goto out; ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg); out: mutex_unlock(&pfvf->qos.qos_lock); return ret; } static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { otx2_qos_txschq_fill_cfg(pfvf, node, cfg); return otx2_qos_txschq_push_cfg(pfvf, node, cfg); } static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *root, struct otx2_qos_cfg *cfg) { root->schq = cfg->schq_list[root->level][0]; return otx2_qos_txschq_config(pfvf, root); } static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) { int lvl, idx, schq; for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (idx = 0; idx < cfg->schq[lvl]; idx++) { schq = cfg->schq_list[lvl][idx]; otx2_txschq_free_one(pfvf, lvl, schq); } } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { if (cfg->schq_index_used[lvl][idx]) { schq = cfg->schq_contig_list[lvl][idx]; otx2_txschq_free_one(pfvf, lvl, schq); } } } } static void otx2_qos_enadis_sq(struct otx2_nic *pfvf, struct otx2_qos_node *node, u16 qid) { if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ) otx2_qos_disable_sq(pfvf, qid); pfvf->qos.qid_to_sqmap[qid] = node->schq; otx2_qos_enable_sq(pfvf, qid); } static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf, struct otx2_qos_node *node, bool action) { struct otx2_qos_node *tmp; if (node->qid == OTX2_QOS_QID_INNER) return; list_for_each_entry(tmp, &node->child_schq_list, list) { if (tmp->level == NIX_TXSCH_LVL_MDQ) { if (action == QOS_SMQ_FLUSH) otx2_smq_flush(pfvf, tmp->schq); else otx2_qos_enadis_sq(pfvf, tmp, node->qid); } } } static void __otx2_qos_update_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node, bool action) { struct otx2_qos_node *tmp; list_for_each_entry(tmp, &node->child_list, list) { __otx2_qos_update_smq(pfvf, tmp, action); if (tmp->qid == OTX2_QOS_QID_INNER) continue; if (tmp->level == NIX_TXSCH_LVL_MDQ) { if (action == QOS_SMQ_FLUSH) otx2_smq_flush(pfvf, tmp->schq); else otx2_qos_enadis_sq(pfvf, tmp, tmp->qid); } else { otx2_qos_update_smq_schq(pfvf, tmp, action); } } } static void otx2_qos_update_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node, bool action) { mutex_lock(&pfvf->qos.qos_lock); __otx2_qos_update_smq(pfvf, node, action); otx2_qos_update_smq_schq(pfvf, node, action); mutex_unlock(&pfvf->qos.qos_lock); } static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { int ret; ret = otx2_qos_txschq_alloc(pfvf, cfg); if (ret) return -ENOSPC; ret = otx2_qos_assign_base_idx(pfvf, node); if (ret) return -ENOMEM; if (!(pfvf->netdev->flags & IFF_UP)) { otx2_qos_txschq_fill_cfg(pfvf, node, cfg); return 0; } ret = otx2_qos_txschq_update_config(pfvf, node, cfg); if (ret) { otx2_qos_free_cfg(pfvf, cfg); return -EIO; } otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ); return 0; } static int otx2_qos_update_tree(struct otx2_nic *pfvf, struct otx2_qos_node *node, struct otx2_qos_cfg *cfg) { otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg); return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg); } static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls, struct netlink_ext_ack *extack) { struct otx2_qos_cfg *new_cfg; struct otx2_qos_node *root; int err; netdev_dbg(pfvf->netdev, "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n", htb_maj_id, htb_defcls); root = otx2_qos_alloc_root(pfvf); if (IS_ERR(root)) { err = PTR_ERR(root); return err; } /* allocate txschq queue */ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); if (!new_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); err = -ENOMEM; goto free_root_node; } /* allocate htb root node */ new_cfg->schq[root->level] = 1; err = otx2_qos_txschq_alloc(pfvf, new_cfg); if (err) { NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq"); goto free_root_node; } /* Update TL1 RR PRIO */ if (root->level == NIX_TXSCH_LVL_TL1) { root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio; netdev_dbg(pfvf->netdev, "TL1 DWRR Priority %d\n", root->child_dwrr_prio); } if (!(pfvf->netdev->flags & IFF_UP) || root->level == NIX_TXSCH_LVL_TL1) { root->schq = new_cfg->schq_list[root->level][0]; goto out; } /* update the txschq configuration in hw */ err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg); if (err) { NL_SET_ERR_MSG_MOD(extack, "Error updating txschq configuration"); goto txschq_free; } out: WRITE_ONCE(pfvf->qos.defcls, htb_defcls); /* Pairs with smp_load_acquire() in ndo_select_queue */ smp_store_release(&pfvf->qos.maj_id, htb_maj_id); kfree(new_cfg); return 0; txschq_free: otx2_qos_free_cfg(pfvf, new_cfg); free_root_node: kfree(new_cfg); otx2_qos_sw_node_delete(pfvf, root); return err; } static int otx2_qos_root_destroy(struct otx2_nic *pfvf) { struct otx2_qos_node *root; netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n"); /* find root node */ root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); if (!root) return -ENOENT; /* free the hw mappings */ otx2_qos_destroy_node(pfvf, root); return 0; } static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum) { u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum); int err = 0; /* Max Round robin weight supported by octeontx2 and CN10K * is different. Validate accordingly */ if (is_dev_otx2(pfvf->pdev)) err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0; else if (rr_weight > CN10K_MAX_RR_WEIGHT) err = -EINVAL; return err; } static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, u64 prio, u64 quantum) { int err; err = otx2_qos_validate_quantum(pfvf, quantum); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value"); return err; } if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) { parent->child_dwrr_prio = prio; } else if (prio != parent->child_dwrr_prio) { NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed"); return -EOPNOTSUPP; } return 0; } static int otx2_qos_validate_configuration(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, u64 prio, bool static_cfg) { if (prio == parent->child_dwrr_prio && static_cfg) { NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists"); return -EEXIST; } if (static_cfg && test_bit(prio, parent->prio_bmap)) { NL_SET_ERR_MSG_MOD(extack, "Static priority child with same priority exists"); return -EEXIST; } return 0; } static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio) { /* For PF, root node dwrr priority is static */ if (parent->level == NIX_TXSCH_LVL_TL1) return; if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) { parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO; clear_bit(prio, parent->prio_bmap); } } static bool is_qos_node_dwrr(struct otx2_qos_node *parent, struct otx2_nic *pfvf, u64 prio) { struct otx2_qos_node *node; bool ret = false; if (parent->child_dwrr_prio == prio) return true; mutex_lock(&pfvf->qos.qos_lock); list_for_each_entry(node, &parent->child_list, list) { if (prio == node->prio) { if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO && parent->child_dwrr_prio != prio) continue; if (otx2_qos_validate_quantum(pfvf, node->quantum)) { netdev_err(pfvf->netdev, "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d", node->classid, node->quantum, node->prio); break; } /* mark old node as dwrr */ node->is_static = false; parent->child_dwrr_cnt++; parent->child_static_cnt--; ret = true; break; } } mutex_unlock(&pfvf->qos.qos_lock); return ret; } static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, u32 parent_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *parent; int qid, ret, err; bool static_cfg; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n", classid, parent_classid, rate, ceil, prio, quantum); if (prio > OTX2_QOS_MAX_PRIO) { NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); ret = -EOPNOTSUPP; goto out; } if (!quantum || quantum > INT_MAX) { NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); ret = -EOPNOTSUPP; goto out; } /* get parent node */ parent = otx2_sw_node_find(pfvf, parent_classid); if (!parent) { NL_SET_ERR_MSG_MOD(extack, "parent node not found"); ret = -ENOENT; goto out; } if (parent->level == NIX_TXSCH_LVL_MDQ) { NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached"); ret = -EOPNOTSUPP; goto out; } static_cfg = !is_qos_node_dwrr(parent, pfvf, prio); ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio, static_cfg); if (ret) goto out; if (!static_cfg) { ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio, quantum); if (ret) goto out; } if (static_cfg) parent->child_static_cnt++; else parent->child_dwrr_cnt++; set_bit(prio, parent->prio_bmap); /* read current txschq configuration */ old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL); if (!old_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); ret = -ENOMEM; goto reset_prio; } otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg); /* allocate a new sq */ qid = otx2_qos_get_qid(pfvf); if (qid < 0) { NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's"); ret = -ENOMEM; goto free_old_cfg; } /* Actual SQ mapping will be updated after SMQ alloc */ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; /* allocate and initialize a new child node */ node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate, ceil, quantum, qid, static_cfg); if (IS_ERR(node)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(node); goto free_old_cfg; } /* push new txschq config to hw */ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); if (!new_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); ret = -ENOMEM; goto free_node; } ret = otx2_qos_update_tree(pfvf, node, new_cfg); if (ret) { NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); kfree(new_cfg); otx2_qos_sw_node_delete(pfvf, node); /* restore the old qos tree */ err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg); if (err) { netdev_err(pfvf->netdev, "Failed to restore txcshq configuration"); goto free_old_cfg; } otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ); goto free_old_cfg; } /* update tx_real_queues */ otx2_qos_update_tx_netdev_queues(pfvf); /* free new txschq config */ kfree(new_cfg); /* free old txschq config */ otx2_qos_free_cfg(pfvf, old_cfg); kfree(old_cfg); return pfvf->hw.tx_queues + qid; free_node: otx2_qos_sw_node_delete(pfvf, node); free_old_cfg: kfree(old_cfg); reset_prio: if (static_cfg) parent->child_static_cnt--; else parent->child_dwrr_cnt--; clear_bit(prio, parent->prio_bmap); out: return ret; } static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, u16 child_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) { struct otx2_qos_cfg *old_cfg, *new_cfg; struct otx2_qos_node *node, *child; bool static_cfg; int ret, err; u16 qid; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n", classid, child_classid, rate, ceil); if (prio > OTX2_QOS_MAX_PRIO) { NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); ret = -EOPNOTSUPP; goto out; } if (!quantum || quantum > INT_MAX) { NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes"); ret = -EOPNOTSUPP; goto out; } /* find node related to classid */ node = otx2_sw_node_find(pfvf, classid); if (!node) { NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); ret = -ENOENT; goto out; } /* check max qos txschq level */ if (node->level == NIX_TXSCH_LVL_MDQ) { NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported"); ret = -EOPNOTSUPP; goto out; } static_cfg = !is_qos_node_dwrr(node, pfvf, prio); if (!static_cfg) { ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio, quantum); if (ret) goto out; } if (static_cfg) node->child_static_cnt++; else node->child_dwrr_cnt++; set_bit(prio, node->prio_bmap); /* store the qid to assign to leaf node */ qid = node->qid; /* read current txschq configuration */ old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL); if (!old_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); ret = -ENOMEM; goto reset_prio; } otx2_qos_read_txschq_cfg(pfvf, node, old_cfg); /* delete the txschq nodes allocated for this node */ otx2_qos_free_sw_node_schq(pfvf, node); /* mark this node as htb inner node */ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* allocate and initialize a new child node */ child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid, prio, rate, ceil, quantum, qid, static_cfg); if (IS_ERR(child)) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); ret = PTR_ERR(child); goto free_old_cfg; } /* push new txschq config to hw */ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); if (!new_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); ret = -ENOMEM; goto free_node; } ret = otx2_qos_update_tree(pfvf, child, new_cfg); if (ret) { NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); kfree(new_cfg); otx2_qos_sw_node_delete(pfvf, child); /* restore the old qos tree */ WRITE_ONCE(node->qid, qid); err = otx2_qos_alloc_txschq_node(pfvf, node); if (err) { netdev_err(pfvf->netdev, "Failed to restore old leaf node"); goto free_old_cfg; } err = otx2_qos_txschq_update_config(pfvf, node, old_cfg); if (err) { netdev_err(pfvf->netdev, "Failed to restore txcshq configuration"); goto free_old_cfg; } otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ); goto free_old_cfg; } /* free new txschq config */ kfree(new_cfg); /* free old txschq config */ otx2_qos_free_cfg(pfvf, old_cfg); kfree(old_cfg); return 0; free_node: otx2_qos_sw_node_delete(pfvf, child); free_old_cfg: kfree(old_cfg); reset_prio: if (static_cfg) node->child_static_cnt--; else node->child_dwrr_cnt--; clear_bit(prio, node->prio_bmap); out: return ret; } static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; int dwrr_del_node = false; u64 prio; u16 qid; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); /* find node related to classid */ node = otx2_sw_node_find(pfvf, *classid); if (!node) { NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); return -ENOENT; } parent = node->parent; prio = node->prio; qid = node->qid; if (!node->is_static) dwrr_del_node = true; otx2_qos_disable_sq(pfvf, node->qid); otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; if (dwrr_del_node) { parent->child_dwrr_cnt--; } else { parent->child_static_cnt--; clear_bit(prio, parent->prio_bmap); } /* Reset DWRR priority if all dwrr nodes are deleted */ if (!parent->child_dwrr_cnt) otx2_reset_dwrr_prio(parent, prio); if (!parent->child_static_cnt) parent->max_static_prio = 0; return 0; } static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; struct otx2_qos_cfg *new_cfg; int dwrr_del_node = false; u64 prio; int err; u16 qid; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid); /* find node related to classid */ node = otx2_sw_node_find(pfvf, classid); if (!node) { NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); return -ENOENT; } /* save qid for use by parent */ qid = node->qid; prio = node->prio; parent = otx2_sw_node_find(pfvf, node->parent->classid); if (!parent) { NL_SET_ERR_MSG_MOD(extack, "parent node not found"); return -ENOENT; } if (!node->is_static) dwrr_del_node = true; /* destroy the leaf node */ otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; if (dwrr_del_node) { parent->child_dwrr_cnt--; } else { parent->child_static_cnt--; clear_bit(prio, parent->prio_bmap); } /* Reset DWRR priority if all dwrr nodes are deleted */ if (!parent->child_dwrr_cnt) otx2_reset_dwrr_prio(parent, prio); if (!parent->child_static_cnt) parent->max_static_prio = 0; /* create downstream txschq entries to parent */ err = otx2_qos_alloc_txschq_node(pfvf, parent); if (err) { NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration"); return err; } WRITE_ONCE(parent->qid, qid); __set_bit(qid, pfvf->qos.qos_sq_bmap); /* push new txschq config to hw */ new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); if (!new_cfg) { NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); return -ENOMEM; } /* fill txschq cfg and push txschq cfg to hw */ otx2_qos_fill_cfg_schq(parent, new_cfg); err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg); if (err) { NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); kfree(new_cfg); return err; } kfree(new_cfg); /* update tx_real_queues */ otx2_qos_update_tx_netdev_queues(pfvf); return 0; } void otx2_clean_qos_queues(struct otx2_nic *pfvf) { struct otx2_qos_node *root; root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); if (!root) return; otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH); } void otx2_qos_config_txschq(struct otx2_nic *pfvf) { struct otx2_qos_node *root; int err; root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); if (!root) return; if (root->level != NIX_TXSCH_LVL_TL1) { err = otx2_qos_txschq_config(pfvf, root); if (err) { netdev_err(pfvf->netdev, "Error update txschq configuration\n"); goto root_destroy; } } err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL); if (err) { netdev_err(pfvf->netdev, "Error update txschq configuration\n"); goto root_destroy; } otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ); return; root_destroy: netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n"); /* Free resources allocated */ otx2_qos_root_destroy(pfvf); } int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) { struct otx2_nic *pfvf = netdev_priv(ndev); int res; switch (htb->command) { case TC_HTB_CREATE: return otx2_qos_root_add(pfvf, htb->parent_classid, htb->classid, htb->extack); case TC_HTB_DESTROY: return otx2_qos_root_destroy(pfvf); case TC_HTB_LEAF_ALLOC_QUEUE: res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid, htb->parent_classid, htb->rate, htb->ceil, htb->prio, htb->quantum, htb->extack); if (res < 0) return res; htb->qid = res; return 0; case TC_HTB_LEAF_TO_INNER: return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid, htb->classid, htb->rate, htb->ceil, htb->prio, htb->quantum, htb->extack); case TC_HTB_LEAF_DEL: return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack); case TC_HTB_LEAF_DEL_LAST: case TC_HTB_LEAF_DEL_LAST_FORCE: return otx2_qos_leaf_del_last(pfvf, htb->classid, htb->command == TC_HTB_LEAF_DEL_LAST_FORCE, htb->extack); case TC_HTB_LEAF_QUERY_QUEUE: res = otx2_get_txq_by_classid(pfvf, htb->classid); htb->qid = res; return 0; case TC_HTB_NODE_MODIFY: fallthrough; default: return -EOPNOTSUPP; } }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. * */ #include "cn10k.h" #include "otx2_reg.h" #include "otx2_struct.h" static struct dev_hw_ops otx2_hw_ops = { .sq_aq_init = otx2_sq_aq_init, .sqe_flush = otx2_sqe_flush, .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, }; static struct dev_hw_ops cn10k_hw_ops = { .sq_aq_init = cn10k_sq_aq_init, .sqe_flush = cn10k_sqe_flush, .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; int cn10k_lmtst_init(struct otx2_nic *pfvf) { struct lmtst_tbl_setup_req *req; struct otx2_lmt_info *lmt_info; int err, cpu; if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { pfvf->hw_ops = &otx2_hw_ops; return 0; } pfvf->hw_ops = &cn10k_hw_ops; /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE); pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info); mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->use_local_lmt_region = true; err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, LMT_LINE_SIZE); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; req->lmt_iova = (u64)pfvf->dync_lmt->iova; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); for_each_possible_cpu(cpu) { lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu); lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base + (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE)); lmt_info->lmt_id = cpu * LMT_BURST_SIZE; } return 0; } EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { struct nix_cn10k_aq_enq_req *aq; struct otx2_nic *pfvf = dev; /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; aq->sq.cq = pfvf->hw.rx_queues + qidx; aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ aq->sq.cq_ena = 1; aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; aq->sq.qint_idx = 0; /* Due pipelining impact minimum 2000 unused SQ CQE's * need to maintain to avoid CQ overflow. */ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); /* Fill AQ info */ aq->qidx = qidx; aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_INIT; return otx2_sync_mbox_msg(&pfvf->mbox); } #define NPA_MAX_BURST 16 int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; dma_addr_t bufptr; int num_ptrs = 1; /* Refill pool with new buffers */ while (cq->pool_ptrs) { if (otx2_alloc_buffer(pfvf, cq, &bufptr)) { if (num_ptrs--) __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, num_ptrs); break; } cq->pool_ptrs--; ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM; num_ptrs++; if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) { __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, num_ptrs); num_ptrs = 1; } } return cnt - cq->pool_ptrs; } void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { struct otx2_lmt_info *lmt_info; struct otx2_nic *pfvf = dev; u64 val = 0, tar_addr = 0; lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ val = (lmt_info->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. */ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4; dma_wmb(); memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size); cn10k_lmt_flush(val, tar_addr); sq->head++; sq->head &= (sq->sqe_cnt - 1); } int cn10k_free_all_ipolicers(struct otx2_nic *pfvf) { struct nix_bandprof_free_req *req; int rc; if (is_dev_otx2(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox); if (!req) { rc = -ENOMEM; goto out; } /* Free all bandwidth profiles allocated */ req->free_all = true; rc = otx2_sync_mbox_msg(&pfvf->mbox); out: mutex_unlock(&pfvf->mbox.lock); return rc; } int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf) { struct nix_bandprof_alloc_req *req; struct nix_bandprof_alloc_rsp *rsp; int rc; req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox); if (!req) return -ENOMEM; req->prof_count[BAND_PROF_LEAF_LAYER] = 1; rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) goto out; rsp = (struct nix_bandprof_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) { rc = -EIO; goto out; } *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0]; out: if (rc) { dev_warn(pfvf->dev, "Failed to allocate ingress bandwidth policer\n"); } return rc; } int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; int ret; mutex_lock(&pfvf->mbox.lock); ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer); mutex_unlock(&pfvf->mbox.lock); return ret; } #define POLICER_TIMESTAMP 1 /* 1 second */ #define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */ static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp, u32 *burst_mantissa) { int tmp; /* Burst is calculated as * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT] * This is the upper limit on number tokens (bytes) that * can be accumulated in the bucket. */ *burst_exp = ilog2(burst); if (burst < 256) { /* No float: can't express mantissa in this case */ *burst_mantissa = 0; return; } if (*burst_exp > MAX_RATE_EXP) *burst_exp = MAX_RATE_EXP; /* Calculate mantissa * Find remaining bytes 'burst - 2^burst_exp' * mantissa = (remaining bytes) / 2^ (burst_exp - 8) */ tmp = burst - rounddown_pow_of_two(burst); *burst_mantissa = tmp / (1UL << (*burst_exp - 8)); } static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp, u32 *rate_mantissa, u32 *rdiv) { u32 div = 0; u32 exp = 0; u64 tmp; /* Figure out mantissa, exponent and divider from given max pkt rate * * To achieve desired rate HW adds * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket. * Here policer timeunit is 2 usecs and rate is in bits per sec. * Since floating point cannot be used below algorithm uses 1000000 * scale factor to support rates upto 100Gbps. */ tmp = rate * 32 * 2; if (tmp < 256000000) { while (tmp < 256000000) { tmp = tmp * 2; div++; } } else { for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++) tmp = tmp / 2; if (exp > MAX_RATE_EXP) exp = MAX_RATE_EXP; } *rate_mantissa = (tmp - 256000000) / 1000000; *rate_exp = exp; *rdiv = div; } int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, u16 policer, bool map) { struct nix_cn10k_aq_enq_req *aq; aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; /* Enable policing and set the bandwidth profile (policer) index */ if (map) aq->rq.policer_ena = 1; else aq->rq.policer_ena = 0; aq->rq_mask.policer_ena = 1; aq->rq.band_prof_id = policer; aq->rq_mask.band_prof_id = GENMASK(9, 0); /* Fill AQ info */ aq->qidx = rq_idx; aq->ctype = NIX_AQ_CTYPE_RQ; aq->op = NIX_AQ_INSTOP_WRITE; return otx2_sync_mbox_msg(&pfvf->mbox); } int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf) { struct nix_bandprof_free_req *req; req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox); if (!req) return -ENOMEM; req->prof_count[BAND_PROF_LEAF_LAYER] = 1; req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf; return otx2_sync_mbox_msg(&pfvf->mbox); } int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; int qidx, rc; mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ for (qidx = 0; qidx < hw->rx_queues; qidx++) cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); mutex_unlock(&pfvf->mbox.lock); return rc; } int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, u32 burst, u64 rate, bool pps) { struct nix_cn10k_aq_enq_req *aq; u32 burst_exp, burst_mantissa; u32 rate_exp, rate_mantissa; u32 rdiv; /* Get exponent and mantissa values for the desired rate */ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa); cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv); /* Init bandwidth profile */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) return -ENOMEM; /* Set initial color mode to blind */ aq->prof.icolor = 0x03; aq->prof_mask.icolor = 0x03; /* Set rate and burst values */ aq->prof.cir_exponent = rate_exp; aq->prof_mask.cir_exponent = 0x1F; aq->prof.cir_mantissa = rate_mantissa; aq->prof_mask.cir_mantissa = 0xFF; aq->prof.cbs_exponent = burst_exp; aq->prof_mask.cbs_exponent = 0x1F; aq->prof.cbs_mantissa = burst_mantissa; aq->prof_mask.cbs_mantissa = 0xFF; aq->prof.rdiv = rdiv; aq->prof_mask.rdiv = 0xF; if (pps) { /* The amount of decremented tokens is calculated according to * the following equation: * max([ LMODE ? 0 : (packet_length - LXPTR)] + * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT], * 1/256) * if LMODE is 1 then rate limiting will be based on * PPS otherwise bps. * The aim of the ADJUST value is to specify a token cost per * packet in contrary to the packet length that specifies a * cost per byte. To rate limit based on PPS adjust mantissa * is set as 384 and exponent as 1 so that number of tokens * decremented becomes 1 i.e, 1 token per packeet. */ aq->prof.adjust_exponent = 1; aq->prof_mask.adjust_exponent = 0x1F; aq->prof.adjust_mantissa = 384; aq->prof_mask.adjust_mantissa = 0x1FF; aq->prof.lmode = 0x1; aq->prof_mask.lmode = 0x1; } /* Two rate three color marker * With PEIR/EIR set to zero, color will be either green or red */ aq->prof.meter_algo = 2; aq->prof_mask.meter_algo = 0x3; aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP; aq->prof_mask.rc_action = 0x3; aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS; aq->prof_mask.yc_action = 0x3; aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS; aq->prof_mask.gc_action = 0x3; /* Setting exponent value as 24 and mantissa as 0 configures * the bucket with zero values making bucket unused. Peak * information rate and Excess information rate buckets are * unused here. */ aq->prof.peir_exponent = 24; aq->prof_mask.peir_exponent = 0x1F; aq->prof.peir_mantissa = 0; aq->prof_mask.peir_mantissa = 0xFF; aq->prof.pebs_exponent = 24; aq->prof_mask.pebs_exponent = 0x1F; aq->prof.pebs_mantissa = 0; aq->prof_mask.pebs_mantissa = 0xFF; /* Fill AQ info */ aq->qidx = profile; aq->ctype = NIX_AQ_CTYPE_BANDPROF; aq->op = NIX_AQ_INSTOP_WRITE; return otx2_sync_mbox_msg(&pfvf->mbox); } int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf, u32 burst, u64 rate) { struct otx2_hw *hw = &pfvf->hw; int qidx, rc; mutex_lock(&pfvf->mbox.lock); rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst, rate, false); if (rc) goto out; for (qidx = 0; qidx < hw->rx_queues; qidx++) { rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, true); if (rc) break; } out: mutex_unlock(&pfvf->mbox.lock); return rc; }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. * */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/rhashtable.h> #include <linux/bitfield.h> #include <net/flow_dissector.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/ipv6.h> #include "cn10k.h" #include "otx2_common.h" #include "qos.h" #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL #define CN10K_MAX_BURST_SIZE 8453888ULL #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) struct otx2_tc_flow_stats { u64 bytes; u64 pkts; u64 used; }; struct otx2_tc_flow { struct list_head list; unsigned long cookie; struct rcu_head rcu; struct otx2_tc_flow_stats stats; spinlock_t lock; /* lock for stats */ u16 rq; u16 entry; u16 leaf_profile; bool is_act_police; u32 prio; struct npc_install_flow_req req; }; static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, u32 *burst_exp, u32 *burst_mantissa) { int max_burst, max_mantissa; unsigned int tmp; if (is_dev_otx2(nic->pdev)) { max_burst = MAX_BURST_SIZE; max_mantissa = MAX_BURST_MANTISSA; } else { max_burst = CN10K_MAX_BURST_SIZE; max_mantissa = CN10K_MAX_BURST_MANTISSA; } /* Burst is calculated as * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 * Max supported burst size is 130,816 bytes. */ burst = min_t(u32, burst, max_burst); if (burst) { *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; tmp = burst - rounddown_pow_of_two(burst); if (burst < max_mantissa) *burst_mantissa = tmp * 2; else *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); } else { *burst_exp = MAX_BURST_EXPONENT; *burst_mantissa = max_mantissa; } } static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, u32 *mantissa, u32 *div_exp) { u64 tmp; /* Rate calculation by hardware * * PIR_ADD = ((256 + mantissa) << exp) / 256 * rate = (2 * PIR_ADD) / ( 1 << div_exp) * The resultant rate is in Mbps. */ /* 2Mbps to 100Gbps can be expressed with div_exp = 0. * Setting this to '0' will ease the calculation of * exponent and mantissa. */ *div_exp = 0; if (maxrate) { *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; tmp = maxrate - rounddown_pow_of_two(maxrate); if (maxrate < MAX_RATE_MANTISSA) *mantissa = tmp * 2; else *mantissa = tmp / (1ULL << (*exp - 7)); } else { /* Instead of disabling rate limiting, set all values to max */ *exp = MAX_RATE_EXPONENT; *mantissa = MAX_RATE_MANTISSA; } } u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, u64 maxrate, u32 burst) { u32 burst_exp, burst_mantissa; u32 exp, mantissa, div_exp; u64 regval = 0; /* Get exponent and mantissa values from the desired rate */ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); if (is_dev_otx2(nic->pdev)) { regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | FIELD_PREP(TLX_RATE_EXPONENT, exp) | FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); } else { regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | FIELD_PREP(TLX_RATE_EXPONENT, exp) | FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); } return regval; } static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u64 maxrate) { struct otx2_hw *hw = &nic->hw; struct nix_txschq_config *req; int txschq, err; /* All SQs share the same TL4, so pick the first scheduler */ txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); if (!req) { mutex_unlock(&nic->mbox.lock); return -ENOMEM; } req->lvl = NIX_TXSCH_LVL_TL4; req->num_regs = 1; req->reg[0] = NIX_AF_TL4X_PIR(txschq); req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); err = otx2_sync_mbox_msg(&nic->mbox); mutex_unlock(&nic->mbox.lock); return err; } static int otx2_tc_validate_flow(struct otx2_nic *nic, struct flow_action *actions, struct netlink_ext_ack *extack) { if (nic->flags & OTX2_FLAG_INTF_DOWN) { NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); return -EINVAL; } if (!flow_action_has_entries(actions)) { NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); return -EINVAL; } if (!flow_offload_has_one_action(actions)) { NL_SET_ERR_MSG_MOD(extack, "Egress MATCHALL offload supports only 1 policing action"); return -EINVAL; } return 0; } static int otx2_policer_validate(const struct flow_action *action, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { if (act->police.exceed.act_id != FLOW_ACTION_DROP) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop"); return -EOPNOTSUPP; } if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok"); return -EOPNOTSUPP; } if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && !flow_action_is_last_entry(action, act)) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is ok, but action is not last"); return -EOPNOTSUPP; } if (act->police.peakrate_bytes_ps || act->police.avrate || act->police.overhead) { NL_SET_ERR_MSG_MOD(extack, "Offload not supported when peakrate/avrate/overhead is configured"); return -EOPNOTSUPP; } return 0; } static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct flow_action *actions = &cls->rule->action; struct flow_action_entry *entry; int err; err = otx2_tc_validate_flow(nic, actions, extack); if (err) return err; if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { NL_SET_ERR_MSG_MOD(extack, "Only one Egress MATCHALL ratelimiter can be offloaded"); return -ENOMEM; } entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE: err = otx2_policer_validate(&cls->rule->action, entry, extack); if (err) return err; if (entry->police.rate_pkt_ps) { NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); return -EOPNOTSUPP; } err = otx2_set_matchall_egress_rate(nic, entry->police.burst, otx2_convert_rate(entry->police.rate_bytes_ps)); if (err) return err; nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; break; default: NL_SET_ERR_MSG_MOD(extack, "Only police action is supported with Egress MATCHALL offload"); return -EOPNOTSUPP; } return 0; } static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; int err; if (nic->flags & OTX2_FLAG_INTF_DOWN) { NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); return -EINVAL; } err = otx2_set_matchall_egress_rate(nic, 0, 0); nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; return err; } static int otx2_tc_act_set_police(struct otx2_nic *nic, struct otx2_tc_flow *node, struct flow_cls_offload *f, u64 rate, u32 burst, u32 mark, struct npc_install_flow_req *req, bool pps) { struct netlink_ext_ack *extack = f->common.extack; struct otx2_hw *hw = &nic->hw; int rq_idx, rc; rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); if (rq_idx >= hw->rx_queues) { NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); return -EINVAL; } mutex_lock(&nic->mbox.lock); rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); if (rc) { mutex_unlock(&nic->mbox.lock); return rc; } rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); if (rc) goto free_leaf; rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); if (rc) goto free_leaf; mutex_unlock(&nic->mbox.lock); req->match_id = mark & 0xFFFFULL; req->index = rq_idx; req->op = NIX_RX_ACTIONOP_UCAST; set_bit(rq_idx, &nic->rq_bmap); node->is_act_police = true; node->rq = rq_idx; return 0; free_leaf: if (cn10k_free_leaf_profile(nic, node->leaf_profile)) netdev_err(nic->netdev, "Unable to free leaf bandwidth profile(%d)\n", node->leaf_profile); mutex_unlock(&nic->mbox.lock); return rc; } static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action *flow_action, struct npc_install_flow_req *req, struct flow_cls_offload *f, struct otx2_tc_flow *node) { struct netlink_ext_ack *extack = f->common.extack; struct flow_action_entry *act; struct net_device *target; struct otx2_nic *priv; u32 burst, mark = 0; u8 nr_police = 0; bool pps = false; u64 rate; int err; int i; if (!flow_action_has_entries(flow_action)) { NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); return -EINVAL; } flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP: req->op = NIX_RX_ACTIONOP_DROP; return 0; case FLOW_ACTION_ACCEPT: req->op = NIX_RX_ACTION_DEFAULT; return 0; case FLOW_ACTION_REDIRECT_INGRESS: target = act->dev; priv = netdev_priv(target); /* npc_install_flow_req doesn't support passing a target pcifunc */ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { NL_SET_ERR_MSG_MOD(extack, "can't redirect to other pf/vf"); return -EOPNOTSUPP; } req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; /* if op is already set; avoid overwriting the same */ if (!req->op) req->op = NIX_RX_ACTION_DEFAULT; break; case FLOW_ACTION_VLAN_POP: req->vtag0_valid = true; /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; break; case FLOW_ACTION_POLICE: /* Ingress ratelimiting is not supported on OcteonTx2 */ if (is_dev_otx2(nic->pdev)) { NL_SET_ERR_MSG_MOD(extack, "Ingress policing not supported on this platform"); return -EOPNOTSUPP; } err = otx2_policer_validate(flow_action, act, extack); if (err) return err; if (act->police.rate_bytes_ps > 0) { rate = act->police.rate_bytes_ps * 8; burst = act->police.burst; } else if (act->police.rate_pkt_ps > 0) { /* The algorithm used to calculate rate * mantissa, exponent values for a given token * rate (token can be byte or packet) requires * token rate to be mutiplied by 8. */ rate = act->police.rate_pkt_ps * 8; burst = act->police.burst_pkt; pps = true; } nr_police++; break; case FLOW_ACTION_MARK: mark = act->mark; break; case FLOW_ACTION_RX_QUEUE_MAPPING: req->op = NIX_RX_ACTIONOP_UCAST; req->index = act->rx_queue; break; default: return -EOPNOTSUPP; } } if (nr_police > 1) { NL_SET_ERR_MSG_MOD(extack, "rate limit police offload requires a single action"); return -EOPNOTSUPP; } if (nr_police) return otx2_tc_act_set_police(nic, node, f, rate, burst, mark, req, pps); return 0; } static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, struct flow_msg *flow_mask, struct flow_rule *rule, struct npc_install_flow_req *req, bool is_inner) { struct flow_match_vlan match; u16 vlan_tci, vlan_tci_mask; if (is_inner) flow_rule_match_cvlan(rule, &match); else flow_rule_match_vlan(rule, &match); if (!eth_type_vlan(match.key->vlan_tpid)) { netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", ntohs(match.key->vlan_tpid)); return -EOPNOTSUPP; } if (!match.mask->vlan_id) { struct flow_action_entry *act; int i; flow_action_for_each(i, act, &rule->action) { if (act->id == FLOW_ACTION_DROP) { netdev_err(nic->netdev, "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", ntohs(match.key->vlan_tpid), match.key->vlan_id); return -EOPNOTSUPP; } } } if (match.mask->vlan_id || match.mask->vlan_dei || match.mask->vlan_priority) { vlan_tci = match.key->vlan_id | match.key->vlan_dei << 12 | match.key->vlan_priority << 13; vlan_tci_mask = match.mask->vlan_id | match.mask->vlan_dei << 12 | match.mask->vlan_priority << 13; if (is_inner) { flow_spec->vlan_itci = htons(vlan_tci); flow_mask->vlan_itci = htons(vlan_tci_mask); req->features |= BIT_ULL(NPC_INNER_VID); } else { flow_spec->vlan_tci = htons(vlan_tci); flow_mask->vlan_tci = htons(vlan_tci_mask); req->features |= BIT_ULL(NPC_OUTER_VID); } } return 0; } static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, struct flow_cls_offload *f, struct npc_install_flow_req *req) { struct netlink_ext_ack *extack = f->common.extack; struct flow_msg *flow_spec = &req->packet; struct flow_msg *flow_mask = &req->mask; struct flow_dissector *dissector; struct flow_rule *rule; u8 ip_proto = 0; rule = flow_cls_offload_flow_rule(f); dissector = rule->match.dissector; if ((dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_CVLAN) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_IPSEC) | BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); /* All EtherTypes can be matched, no hw limitation */ flow_spec->etype = match.key->n_proto; flow_mask->etype = match.mask->n_proto; req->features |= BIT_ULL(NPC_ETYPE); if (match.mask->ip_proto && (match.key->ip_proto != IPPROTO_TCP && match.key->ip_proto != IPPROTO_UDP && match.key->ip_proto != IPPROTO_SCTP && match.key->ip_proto != IPPROTO_ICMP && match.key->ip_proto != IPPROTO_ESP && match.key->ip_proto != IPPROTO_AH && match.key->ip_proto != IPPROTO_ICMPV6)) { netdev_info(nic->netdev, "ip_proto=0x%x not supported\n", match.key->ip_proto); return -EOPNOTSUPP; } if (match.mask->ip_proto) ip_proto = match.key->ip_proto; if (ip_proto == IPPROTO_UDP) req->features |= BIT_ULL(NPC_IPPROTO_UDP); else if (ip_proto == IPPROTO_TCP) req->features |= BIT_ULL(NPC_IPPROTO_TCP); else if (ip_proto == IPPROTO_SCTP) req->features |= BIT_ULL(NPC_IPPROTO_SCTP); else if (ip_proto == IPPROTO_ICMP) req->features |= BIT_ULL(NPC_IPPROTO_ICMP); else if (ip_proto == IPPROTO_ICMPV6) req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); else if (ip_proto == IPPROTO_ESP) req->features |= BIT_ULL(NPC_IPPROTO_ESP); else if (ip_proto == IPPROTO_AH) req->features |= BIT_ULL(NPC_IPPROTO_AH); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; flow_rule_match_control(rule, &match); if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); return -EOPNOTSUPP; } if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { if (ntohs(flow_spec->etype) == ETH_P_IP) { flow_spec->ip_flag = IPV4_FLAG_MORE; flow_mask->ip_flag = IPV4_FLAG_MORE; req->features |= BIT_ULL(NPC_IPFRAG_IPV4); } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { flow_spec->next_header = IPPROTO_FRAGMENT; flow_mask->next_header = 0xff; req->features |= BIT_ULL(NPC_IPFRAG_IPV6); } else { NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); return -EOPNOTSUPP; } } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(rule, &match); if (!is_zero_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); return -EOPNOTSUPP; } if (!is_zero_ether_addr(match.mask->dst)) { ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); ether_addr_copy(flow_mask->dmac, (u8 *)&match.mask->dst); req->features |= BIT_ULL(NPC_DMAC); } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { struct flow_match_ipsec match; flow_rule_match_ipsec(rule, &match); if (!match.mask->spi) { NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); return -EOPNOTSUPP; } if (ip_proto != IPPROTO_ESP && ip_proto != IPPROTO_AH) { NL_SET_ERR_MSG_MOD(extack, "SPI index is valid only for ESP/AH proto"); return -EOPNOTSUPP; } flow_spec->spi = match.key->spi; flow_mask->spi = match.mask->spi; req->features |= BIT_ULL(NPC_IPSEC_SPI); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; flow_rule_match_ip(rule, &match); if ((ntohs(flow_spec->etype) != ETH_P_IP) && match.mask->tos) { NL_SET_ERR_MSG_MOD(extack, "tos not supported"); return -EOPNOTSUPP; } if (match.mask->ttl) { NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); return -EOPNOTSUPP; } flow_spec->tos = match.key->tos; flow_mask->tos = match.mask->tos; req->features |= BIT_ULL(NPC_TOS); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { int ret; ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); if (ret) return ret; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { int ret; ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); if (ret) return ret; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); flow_spec->ip4dst = match.key->dst; flow_mask->ip4dst = match.mask->dst; req->features |= BIT_ULL(NPC_DIP_IPV4); flow_spec->ip4src = match.key->src; flow_mask->ip4src = match.mask->src; req->features |= BIT_ULL(NPC_SIP_IPV4); } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); if (ipv6_addr_loopback(&match.key->dst) || ipv6_addr_loopback(&match.key->src)) { NL_SET_ERR_MSG_MOD(extack, "Flow matching IPv6 loopback addr not supported"); return -EOPNOTSUPP; } if (!ipv6_addr_any(&match.mask->dst)) { memcpy(&flow_spec->ip6dst, (struct in6_addr *)&match.key->dst, sizeof(flow_spec->ip6dst)); memcpy(&flow_mask->ip6dst, (struct in6_addr *)&match.mask->dst, sizeof(flow_spec->ip6dst)); req->features |= BIT_ULL(NPC_DIP_IPV6); } if (!ipv6_addr_any(&match.mask->src)) { memcpy(&flow_spec->ip6src, (struct in6_addr *)&match.key->src, sizeof(flow_spec->ip6src)); memcpy(&flow_mask->ip6src, (struct in6_addr *)&match.mask->src, sizeof(flow_spec->ip6src)); req->features |= BIT_ULL(NPC_SIP_IPV6); } } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); flow_spec->dport = match.key->dst; flow_mask->dport = match.mask->dst; if (flow_mask->dport) { if (ip_proto == IPPROTO_UDP) req->features |= BIT_ULL(NPC_DPORT_UDP); else if (ip_proto == IPPROTO_TCP) req->features |= BIT_ULL(NPC_DPORT_TCP); else if (ip_proto == IPPROTO_SCTP) req->features |= BIT_ULL(NPC_DPORT_SCTP); } flow_spec->sport = match.key->src; flow_mask->sport = match.mask->src; if (flow_mask->sport) { if (ip_proto == IPPROTO_UDP) req->features |= BIT_ULL(NPC_SPORT_UDP); else if (ip_proto == IPPROTO_TCP) req->features |= BIT_ULL(NPC_SPORT_TCP); else if (ip_proto == IPPROTO_SCTP) req->features |= BIT_ULL(NPC_SPORT_SCTP); } } return otx2_tc_parse_actions(nic, &rule->action, req, f, node); } static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_tc_flow *iter, *tmp; if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) return; list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { list_del(&iter->list); kfree(iter); flow_cfg->nr_flows--; } } static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, unsigned long cookie) { struct otx2_tc_flow *tmp; list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { if (tmp->cookie == cookie) return tmp; } return NULL; } static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, int index) { struct otx2_tc_flow *tmp; int i = 0; list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { if (i == index) return tmp; i++; } return NULL; } static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node) { struct list_head *pos, *n; struct otx2_tc_flow *tmp; list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { tmp = list_entry(pos, struct otx2_tc_flow, list); if (node == tmp) { list_del(&node->list); return; } } } static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node) { struct list_head *pos, *n; struct otx2_tc_flow *tmp; int index = 0; /* If the flow list is empty then add the new node */ if (list_empty(&flow_cfg->flow_list_tc)) { list_add(&node->list, &flow_cfg->flow_list_tc); return index; } list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { tmp = list_entry(pos, struct otx2_tc_flow, list); if (node->prio < tmp->prio) break; index++; } list_add(&node->list, pos->prev); return index; } static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) { struct npc_install_flow_req *tmp_req; int err; mutex_lock(&nic->mbox.lock); tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); if (!tmp_req) { mutex_unlock(&nic->mbox.lock); return -ENOMEM; } memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); /* Send message to AF */ err = otx2_sync_mbox_msg(&nic->mbox); if (err) { netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", req->entry); mutex_unlock(&nic->mbox.lock); return -EFAULT; } mutex_unlock(&nic->mbox.lock); return 0; } static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) { struct npc_delete_flow_rsp *rsp; struct npc_delete_flow_req *req; int err; mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); if (!req) { mutex_unlock(&nic->mbox.lock); return -ENOMEM; } req->entry = entry; /* Send message to AF */ err = otx2_sync_mbox_msg(&nic->mbox); if (err) { netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", entry); mutex_unlock(&nic->mbox.lock); return -EFAULT; } if (cntr_val) { rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", entry); mutex_unlock(&nic->mbox.lock); return -EFAULT; } *cntr_val = rsp->cntr_val; } mutex_unlock(&nic->mbox.lock); return 0; } static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node) { struct list_head *pos, *n; struct otx2_tc_flow *tmp; int i = 0, index = 0; u16 cntr_val = 0; /* Find and delete the entry from the list and re-install * all the entries from beginning to the index of the * deleted entry to higher mcam indexes. */ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { tmp = list_entry(pos, struct otx2_tc_flow, list); if (node == tmp) { list_del(&tmp->list); break; } otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); tmp->entry++; tmp->req.entry = tmp->entry; tmp->req.cntr_val = cntr_val; index++; } list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { if (i == index) break; tmp = list_entry(pos, struct otx2_tc_flow, list); otx2_add_mcam_flow_entry(nic, &tmp->req); i++; } return 0; } static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node) { int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; struct otx2_tc_flow *tmp; int list_idx, i; u16 cntr_val = 0; /* Find the index of the entry(list_idx) whose priority * is greater than the new entry and re-install all * the entries from beginning to list_idx to higher * mcam indexes. */ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); for (i = 0; i < list_idx; i++) { tmp = otx2_tc_get_entry_by_index(flow_cfg, i); if (!tmp) return -ENOMEM; otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); tmp->entry = flow_cfg->flow_ent[mcam_idx]; tmp->req.entry = tmp->entry; tmp->req.cntr_val = cntr_val; otx2_add_mcam_flow_entry(nic, &tmp->req); mcam_idx++; } return mcam_idx; } static int otx2_tc_update_mcam_table(struct otx2_nic *nic, struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node, bool add_req) { if (add_req) return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); } static int otx2_tc_del_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_flow *flow_node; int err; flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", tc_flow_cmd->cookie); return -EINVAL; } if (flow_node->is_act_police) { mutex_lock(&nic->mbox.lock); err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, flow_node->leaf_profile, false); if (err) netdev_err(nic->netdev, "Unmapping RQ %d & profile %d failed\n", flow_node->rq, flow_node->leaf_profile); err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); if (err) netdev_err(nic->netdev, "Unable to free leaf bandwidth profile(%d)\n", flow_node->leaf_profile); __clear_bit(flow_node->rq, &nic->rq_bmap); mutex_unlock(&nic->mbox.lock); } otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); kfree_rcu(flow_node, rcu); flow_cfg->nr_flows--; return 0; } static int otx2_tc_add_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_flow *new_node, *old_node; struct npc_install_flow_req *req, dummy; int rc, err, mcam_idx; if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; if (flow_cfg->nr_flows == flow_cfg->max_flows) { NL_SET_ERR_MSG_MOD(extack, "Free MCAM entry not available to add the flow"); return -ENOMEM; } /* allocate memory for the new flow and it's node */ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); if (!new_node) return -ENOMEM; spin_lock_init(&new_node->lock); new_node->cookie = tc_flow_cmd->cookie; new_node->prio = tc_flow_cmd->common.prio; memset(&dummy, 0, sizeof(struct npc_install_flow_req)); rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); if (rc) { kfree_rcu(new_node, rcu); return rc; } /* If a flow exists with the same cookie, delete it */ old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); if (old_node) otx2_tc_del_flow(nic, tc_flow_cmd); mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); if (!req) { mutex_unlock(&nic->mbox.lock); rc = -ENOMEM; goto free_leaf; } memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); req->channel = nic->hw.rx_chan_base; req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; /* Send message to AF */ rc = otx2_sync_mbox_msg(&nic->mbox); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); mutex_unlock(&nic->mbox.lock); goto free_leaf; } mutex_unlock(&nic->mbox.lock); memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); flow_cfg->nr_flows++; return 0; free_leaf: otx2_tc_del_from_flow_list(flow_cfg, new_node); kfree_rcu(new_node, rcu); if (new_node->is_act_police) { mutex_lock(&nic->mbox.lock); err = cn10k_map_unmap_rq_policer(nic, new_node->rq, new_node->leaf_profile, false); if (err) netdev_err(nic->netdev, "Unmapping RQ %d & profile %d failed\n", new_node->rq, new_node->leaf_profile); err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); if (err) netdev_err(nic->netdev, "Unable to free leaf bandwidth profile(%d)\n", new_node->leaf_profile); __clear_bit(new_node->rq, &nic->rq_bmap); mutex_unlock(&nic->mbox.lock); } return rc; } static int otx2_tc_get_flow_stats(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct npc_mcam_get_stats_req *req; struct npc_mcam_get_stats_rsp *rsp; struct otx2_tc_flow_stats *stats; struct otx2_tc_flow *flow_node; int err; flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); if (!flow_node) { netdev_info(nic->netdev, "tc flow not found for cookie %lx", tc_flow_cmd->cookie); return -EINVAL; } mutex_lock(&nic->mbox.lock); req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); if (!req) { mutex_unlock(&nic->mbox.lock); return -ENOMEM; } req->entry = flow_node->entry; err = otx2_sync_mbox_msg(&nic->mbox); if (err) { netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", req->entry); mutex_unlock(&nic->mbox.lock); return -EFAULT; } rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp (&nic->mbox.mbox, 0, &req->hdr); if (IS_ERR(rsp)) { mutex_unlock(&nic->mbox.lock); return PTR_ERR(rsp); } mutex_unlock(&nic->mbox.lock); if (!rsp->stat_ena) return -EINVAL; stats = &flow_node->stats; spin_lock(&flow_node->lock); flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); stats->pkts = rsp->stat; spin_unlock(&flow_node->lock); return 0; } static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: return otx2_tc_add_flow(nic, cls_flower); case FLOW_CLS_DESTROY: return otx2_tc_del_flow(nic, cls_flower); case FLOW_CLS_STATS: return otx2_tc_get_flow_stats(nic, cls_flower); default: return -EOPNOTSUPP; } } static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; struct flow_action *actions = &cls->rule->action; struct flow_action_entry *entry; u64 rate; int err; err = otx2_tc_validate_flow(nic, actions, extack); if (err) return err; if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { NL_SET_ERR_MSG_MOD(extack, "Only one ingress MATCHALL ratelimitter can be offloaded"); return -ENOMEM; } entry = &cls->rule->action.entries[0]; switch (entry->id) { case FLOW_ACTION_POLICE: /* Ingress ratelimiting is not supported on OcteonTx2 */ if (is_dev_otx2(nic->pdev)) { NL_SET_ERR_MSG_MOD(extack, "Ingress policing not supported on this platform"); return -EOPNOTSUPP; } err = cn10k_alloc_matchall_ipolicer(nic); if (err) return err; /* Convert to bits per second */ rate = entry->police.rate_bytes_ps * 8; err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); if (err) return err; nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; break; default: NL_SET_ERR_MSG_MOD(extack, "Only police action supported with Ingress MATCHALL offload"); return -EOPNOTSUPP; } return 0; } static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; int err; if (nic->flags & OTX2_FLAG_INTF_DOWN) { NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); return -EINVAL; } err = cn10k_free_matchall_ipolicer(nic); nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; return err; } static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls_matchall) { switch (cls_matchall->command) { case TC_CLSMATCHALL_REPLACE: return otx2_tc_ingress_matchall_install(nic, cls_matchall); case TC_CLSMATCHALL_DESTROY: return otx2_tc_ingress_matchall_delete(nic, cls_matchall); case TC_CLSMATCHALL_STATS: default: break; } return -EOPNOTSUPP; } static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct otx2_nic *nic = cb_priv; bool ntuple; if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) return -EOPNOTSUPP; ntuple = nic->netdev->features & NETIF_F_NTUPLE; switch (type) { case TC_SETUP_CLSFLOWER: if (ntuple) { netdev_warn(nic->netdev, "Can't install TC flower offload rule when NTUPLE is active"); return -EOPNOTSUPP; } return otx2_setup_tc_cls_flower(nic, type_data); case TC_SETUP_CLSMATCHALL: return otx2_setup_tc_ingress_matchall(nic, type_data); default: break; } return -EOPNOTSUPP; } static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls_matchall) { switch (cls_matchall->command) { case TC_CLSMATCHALL_REPLACE: return otx2_tc_egress_matchall_install(nic, cls_matchall); case TC_CLSMATCHALL_DESTROY: return otx2_tc_egress_matchall_delete(nic, cls_matchall); case TC_CLSMATCHALL_STATS: default: break; } return -EOPNOTSUPP; } static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct otx2_nic *nic = cb_priv; if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSMATCHALL: return otx2_setup_tc_egress_matchall(nic, type_data); default: break; } return -EOPNOTSUPP; } static LIST_HEAD(otx2_block_cb_list); static int otx2_setup_tc_block(struct net_device *netdev, struct flow_block_offload *f) { struct otx2_nic *nic = netdev_priv(netdev); flow_setup_cb_t *cb; bool ingress; if (f->block_shared) return -EOPNOTSUPP; if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { cb = otx2_setup_tc_block_ingress_cb; ingress = true; } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { cb = otx2_setup_tc_block_egress_cb; ingress = false; } else { return -EOPNOTSUPP; } return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, nic, nic, ingress); } int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_BLOCK: return otx2_setup_tc_block(netdev, type_data); case TC_SETUP_QDISC_HTB: return otx2_setup_tc_htb(netdev, type_data); default: return -EOPNOTSUPP; } } EXPORT_SYMBOL(otx2_setup_tc); int otx2_init_tc(struct otx2_nic *nic) { /* Exclude receive queue 0 being used for police action */ set_bit(0, &nic->rq_bmap); if (!nic->flow_cfg) { netdev_err(nic->netdev, "Can't init TC, nic->flow_cfg is not setup\n"); return -EINVAL; } return 0; } EXPORT_SYMBOL(otx2_init_tc); void otx2_shutdown_tc(struct otx2_nic *nic) { otx2_destroy_tc_flow_list(nic); } EXPORT_SYMBOL(otx2_shutdown_tc);
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Virtual Function ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/net_tstamp.h> #include "otx2_common.h" #include "otx2_reg.h" #include "otx2_ptp.h" #include "cn10k.h" #define DRV_NAME "rvu_nicvf" #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" static const struct pci_device_id otx2_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, { } }; MODULE_AUTHOR("Sunil Goutham <[email protected]>"); MODULE_DESCRIPTION(DRV_STRING); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, otx2_vf_id_table); /* RVU VF Interrupt Vector Enumeration */ enum { RVU_VF_INT_VEC_MBOX = 0x0, }; static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf, struct mbox_msghdr *msg) { if (msg->id >= MBOX_MSG_MAX) { dev_err(vf->dev, "Mbox msg with unknown ID %d\n", msg->id); return; } if (msg->sig != OTX2_MBOX_RSP_SIG) { dev_err(vf->dev, "Mbox msg with wrong signature %x, ID %d\n", msg->sig, msg->id); return; } if (msg->rc == MBOX_MSG_INVALID) { dev_err(vf->dev, "PF/AF says the sent msg(s) %d were invalid\n", msg->id); return; } switch (msg->id) { case MBOX_MSG_READY: vf->pcifunc = msg->pcifunc; break; case MBOX_MSG_MSIX_OFFSET: mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg); break; case MBOX_MSG_NPA_LF_ALLOC: mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg); break; case MBOX_MSG_NIX_LF_ALLOC: mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg); break; case MBOX_MSG_NIX_BP_ENABLE: mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg); break; default: if (msg->rc) dev_err(vf->dev, "Mbox msg response has err %d, ID %d\n", msg->rc, msg->id); } } static void otx2vf_vfaf_mbox_handler(struct work_struct *work) { struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; struct mbox *af_mbox; int offset, id; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); if (af_mbox->num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); for (id = 0; id < af_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); offset = mbox->rx_start + msg->next_msgoff; if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) __otx2_mbox_reset(mbox, 0); mdev->msgs_acked++; } } static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, struct mbox_msghdr *req) { struct msg_rsp *rsp; int err; /* Check if valid, if not reply with a invalid msg */ if (req->sig != OTX2_MBOX_REQ_SIG) { otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); return -ENODEV; } switch (req->id) { case MBOX_MSG_CGX_LINK_EVENT: rsp = (struct msg_rsp *)otx2_mbox_alloc_msg( &vf->mbox.mbox_up, 0, sizeof(struct msg_rsp)); if (!rsp) return -ENOMEM; rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; rsp->hdr.pcifunc = 0; rsp->hdr.rc = 0; err = otx2_mbox_up_handler_cgx_link_event( vf, (struct cgx_link_info_msg *)req, rsp); return err; default: otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id); return -ENODEV; } return 0; } static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) { struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; struct mbox *vf_mbox; struct otx2_nic *vf; int offset, id; vf_mbox = container_of(work, struct mbox, mbox_up_wrk); vf = vf_mbox->pfvf; mbox = &vf_mbox->mbox_up; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); if (vf_mbox->up_num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_mbox_msg_up(vf, msg); offset = mbox->rx_start + msg->next_msgoff; } otx2_mbox_msg_send(mbox, 0); } static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) { struct otx2_nic *vf = (struct otx2_nic *)vf_irq; struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; /* Clear the IRQ */ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); /* Read latest mbox data */ smp_rmb(); /* Check for PF => VF response messages */ mbox = &vf->mbox.mbox; mdev = &mbox->dev[0]; otx2_sync_mbox_bbuf(mbox, 0); trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); if (hdr->num_msgs) { vf->mbox.num_msgs = hdr->num_msgs; hdr->num_msgs = 0; memset(mbox->hwbase + mbox->rx_start, 0, ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); } /* Check for PF => VF notification messages */ mbox = &vf->mbox.mbox_up; mdev = &mbox->dev[0]; otx2_sync_mbox_bbuf(mbox, 0); hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); if (hdr->num_msgs) { vf->mbox.up_num_msgs = hdr->num_msgs; hdr->num_msgs = 0; memset(mbox->hwbase + mbox->rx_start, 0, ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); } return IRQ_HANDLED; } static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) { int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX); /* Disable VF => PF mailbox IRQ */ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); free_irq(vector, vf); } static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) { struct otx2_hw *hw = &vf->hw; struct msg_req *req; char *irq_name; int err; /* Register mailbox interrupt handler */ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); if (err) { dev_err(vf->dev, "RVUPF: IRQ registration failed for VFAF mbox irq\n"); return err; } /* Enable mailbox interrupt for msgs coming from PF. * First clear to avoid spurious interrupts, if any. */ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); if (!probe_pf) return 0; /* Check mailbox communication with PF */ req = otx2_mbox_alloc_msg_ready(&vf->mbox); if (!req) { otx2vf_disable_mbox_intr(vf); return -ENOMEM; } err = otx2_sync_mbox_msg(&vf->mbox); if (err) { dev_warn(vf->dev, "AF not responding to mailbox, deferring probe\n"); otx2vf_disable_mbox_intr(vf); return -EPROBE_DEFER; } return 0; } static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf) { struct mbox *mbox = &vf->mbox; if (vf->mbox_wq) { destroy_workqueue(vf->mbox_wq); vf->mbox_wq = NULL; } if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) iounmap((void __iomem *)mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); otx2_mbox_destroy(&mbox->mbox_up); } static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) { struct mbox *mbox = &vf->mbox; void __iomem *hwbase; int err; mbox->pfvf = vf; vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!vf->mbox_wq) return -ENOMEM; if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { /* For cn10k platform, VF mailbox region is in its BAR2 * register space */ hwbase = vf->reg_base + RVU_VF_MBOX_REGION; } else { /* Mailbox is a reserved memory (in RAM) region shared between * admin function (i.e PF0) and this VF, shouldn't be mapped as * device memory to allow unaligned accesses. */ hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM), pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM)); if (!hwbase) { dev_err(vf->dev, "Unable to map VFAF mailbox region\n"); err = -ENOMEM; goto exit; } } err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base, MBOX_DIR_VFPF, 1); if (err) goto exit; err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base, MBOX_DIR_VFPF_UP, 1); if (err) goto exit; err = otx2_mbox_bbuf_init(mbox, vf->pdev); if (err) goto exit; INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler); INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler); mutex_init(&mbox->lock); return 0; exit: if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag)) iounmap(hwbase); destroy_workqueue(vf->mbox_wq); return err; } static int otx2vf_open(struct net_device *netdev) { struct otx2_nic *vf; int err; err = otx2_open(netdev); if (err) return err; /* LBKs do not receive link events so tell everyone we are up here */ vf = netdev_priv(netdev); if (is_otx2_lbkvf(vf->pdev)) { pr_info("%s NIC Link is UP\n", netdev->name); netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); } return 0; } static int otx2vf_stop(struct net_device *netdev) { return otx2_stop(netdev); } static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) { struct otx2_nic *vf = netdev_priv(netdev); int qidx = skb_get_queue_mapping(skb); struct otx2_snd_queue *sq; struct netdev_queue *txq; sq = &vf->qset.sq[qidx]; txq = netdev_get_tx_queue(netdev, qidx); if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ smp_mb(); if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) > sq->sqe_thresh) netif_tx_wake_queue(txq); return NETDEV_TX_BUSY; } return NETDEV_TX_OK; } static void otx2vf_set_rx_mode(struct net_device *netdev) { struct otx2_nic *vf = netdev_priv(netdev); queue_work(vf->otx2_wq, &vf->rx_mode_work); } static void otx2vf_do_set_rx_mode(struct work_struct *work) { struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work); struct net_device *netdev = vf->netdev; unsigned int flags = netdev->flags; struct nix_rx_mode *req; mutex_lock(&vf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox); if (!req) { mutex_unlock(&vf->mbox.lock); return; } req->mode = NIX_RX_MODE_UCAST; if (flags & IFF_PROMISC) req->mode |= NIX_RX_MODE_PROMISC; if (flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; req->mode |= NIX_RX_MODE_USE_MCE; otx2_sync_mbox_msg(&vf->mbox); mutex_unlock(&vf->mbox.lock); } static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) { bool if_up = netif_running(netdev); int err = 0; if (if_up) otx2vf_stop(netdev); netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (if_up) err = otx2vf_open(netdev); return err; } static void otx2vf_reset_task(struct work_struct *work) { struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task); rtnl_lock(); if (netif_running(vf->netdev)) { otx2vf_stop(vf->netdev); vf->reset_count++; otx2vf_open(vf->netdev); } rtnl_unlock(); } static int otx2vf_set_features(struct net_device *netdev, netdev_features_t features) { return otx2_handle_ntuple_tc_features(netdev, features); } static const struct net_device_ops otx2vf_netdev_ops = { .ndo_open = otx2vf_open, .ndo_stop = otx2vf_stop, .ndo_start_xmit = otx2vf_xmit, .ndo_select_queue = otx2_select_queue, .ndo_set_rx_mode = otx2vf_set_rx_mode, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2vf_change_mtu, .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, .ndo_eth_ioctl = otx2_ioctl, .ndo_setup_tc = otx2_setup_tc, }; static int otx2_wq_init(struct otx2_nic *vf) { vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq"); if (!vf->otx2_wq) return -ENOMEM; INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode); INIT_WORK(&vf->reset_task, otx2vf_reset_task); return 0; } static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf) { struct otx2_hw *hw = &vf->hw; int num_vec, err; num_vec = hw->nix_msixoff; num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; otx2vf_disable_mbox_intr(vf); pci_free_irq_vectors(hw->pdev); err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n", __func__, num_vec); return err; } return otx2vf_register_mbox_intr(vf, false); } static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int num_vec = pci_msix_vec_count(pdev); struct device *dev = &pdev->dev; int err, qcount, qos_txqs; struct net_device *netdev; struct otx2_nic *vf; struct otx2_hw *hw; err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; } err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); goto err_release_regions; } pci_set_master(pdev); qcount = num_online_cpus(); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount); if (!netdev) { err = -ENOMEM; goto err_release_regions; } pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); vf = netdev_priv(netdev); vf->netdev = netdev; vf->pdev = pdev; vf->dev = dev; vf->iommu_domain = iommu_get_domain_for_dev(dev); vf->flags |= OTX2_FLAG_INTF_DOWN; hw = &vf->hw; hw->pdev = vf->pdev; hw->rx_queues = qcount; hw->tx_queues = qcount; hw->max_queues = qcount; hw->non_qos_queues = qcount; hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; /* Use CQE of 128 byte descriptor size by default */ hw->xqe_size = 128; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); if (!hw->irq_name) { err = -ENOMEM; goto err_free_netdev; } hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, sizeof(cpumask_var_t), GFP_KERNEL); if (!hw->affinity_mask) { err = -ENOMEM; goto err_free_netdev; } err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", __func__, num_vec); goto err_free_netdev; } vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!vf->reg_base) { dev_err(dev, "Unable to map physical function CSRs, aborting\n"); err = -ENOMEM; goto err_free_irq_vectors; } otx2_setup_dev_hw_settings(vf); /* Init VF <=> PF mailbox stuff */ err = otx2vf_vfaf_mbox_init(vf); if (err) goto err_free_irq_vectors; /* Register mailbox interrupt */ err = otx2vf_register_mbox_intr(vf, true); if (err) goto err_mbox_destroy; /* Request AF to attach NPA and LIX LFs to this AF */ err = otx2_attach_npa_nix(vf); if (err) goto err_disable_mbox_intr; err = otx2vf_realloc_msix_vectors(vf); if (err) goto err_detach_rsrc; err = otx2_set_real_num_queues(netdev, qcount, qcount); if (err) goto err_detach_rsrc; err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; /* Don't check for error. Proceed without ptp */ otx2_ptp_init(vf); /* Assign default mac address */ otx2_get_mac_from_af(netdev); netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4; netdev->features = netdev->hw_features; /* Support TSO on tag interface */ netdev->vlan_features |= netdev->features; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netdev->features |= netdev->hw_features; netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_RXALL; netdev->hw_features |= NETIF_F_HW_TC; netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(vf); /* To distinguish, for LBK VFs set netdev name explicitly */ if (is_otx2_lbkvf(vf->pdev)) { int n; n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK; /* Need to subtract 1 to get proper VF number */ n -= 1; snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); } err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); goto err_ptp_destroy; } err = otx2_wq_init(vf); if (err) goto err_unreg_netdev; otx2vf_set_ethtool_ops(netdev); err = otx2vf_mcam_flow_init(vf); if (err) goto err_unreg_netdev; err = otx2_init_tc(vf); if (err) goto err_unreg_netdev; err = otx2_register_dl(vf); if (err) goto err_shutdown_tc; #ifdef CONFIG_DCB err = otx2_dcbnl_set_ops(netdev); if (err) goto err_shutdown_tc; #endif otx2_qos_init(vf, qos_txqs); return 0; err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: free_percpu(vf->hw.lmt_info); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); err_mbox_destroy: otx2vf_vfaf_mbox_destroy(vf); err_free_irq_vectors: pci_free_irq_vectors(hw->pdev); err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); err_release_regions: pci_release_regions(pdev); return err; } static void otx2vf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *vf; if (!netdev) return; vf = netdev_priv(netdev); /* Disable 802.3x pause frames */ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; otx2_config_pause_frm(vf); } #ifdef CONFIG_DCB /* Disable PFC config */ if (vf->pfc_en) { vf->pfc_en = 0; otx2_config_priority_flow_ctrl(vf); } #endif cancel_work_sync(&vf->reset_task); otx2_unregister_dl(vf); unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); otx2_ptp_destroy(vf); otx2_mcam_flow_del(vf); otx2_shutdown_tc(vf); otx2_shutdown_qos(vf); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); free_percpu(vf->hw.lmt_info); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); pci_release_regions(pdev); } static struct pci_driver otx2vf_driver = { .name = DRV_NAME, .id_table = otx2_vf_id_table, .probe = otx2vf_probe, .remove = otx2vf_remove, .shutdown = otx2vf_remove, }; static int __init otx2vf_init_module(void) { pr_info("%s: %s\n", DRV_NAME, DRV_STRING); return pci_register_driver(&otx2vf_driver); } static void __exit otx2vf_cleanup_module(void) { pci_unregister_driver(&otx2vf_driver); } module_init(otx2vf_init_module); module_exit(otx2vf_cleanup_module);
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/pci.h> #include <linux/ethtool.h> #include <linux/stddef.h> #include <linux/etherdevice.h> #include <linux/log2.h> #include <linux/net_tstamp.h> #include <linux/linkmode.h> #include "otx2_common.h" #include "otx2_ptp.h" #define DRV_NAME "rvu-nicpf" #define DRV_VF_NAME "rvu-nicvf" struct otx2_stat { char name[ETH_GSTRING_LEN]; unsigned int index; }; /* HW device stats */ #define OTX2_DEV_STAT(stat) { \ .name = #stat, \ .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ } enum link_mode { OTX2_MODE_SUPPORTED, OTX2_MODE_ADVERTISED }; static const struct otx2_stat otx2_dev_stats[] = { OTX2_DEV_STAT(rx_ucast_frames), OTX2_DEV_STAT(rx_bcast_frames), OTX2_DEV_STAT(rx_mcast_frames), OTX2_DEV_STAT(tx_ucast_frames), OTX2_DEV_STAT(tx_bcast_frames), OTX2_DEV_STAT(tx_mcast_frames), }; /* Driver level stats */ #define OTX2_DRV_STAT(stat) { \ .name = #stat, \ .index = offsetof(struct otx2_drv_stats, stat) / sizeof(atomic_t), \ } static const struct otx2_stat otx2_drv_stats[] = { OTX2_DRV_STAT(rx_fcs_errs), OTX2_DRV_STAT(rx_oversize_errs), OTX2_DRV_STAT(rx_undersize_errs), OTX2_DRV_STAT(rx_csum_errs), OTX2_DRV_STAT(rx_len_errs), OTX2_DRV_STAT(rx_other_errs), }; static const struct otx2_stat otx2_queue_stats[] = { { "bytes", 0 }, { "frames", 1 }, }; static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats); static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats); static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats); static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf); static void otx2_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct otx2_nic *pfvf = netdev_priv(netdev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info)); } static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) { int start_qidx = qset * pfvf->hw.rx_queues; int qidx, stats; for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { for (stats = 0; stats < otx2_n_queue_stats; stats++) { sprintf(*data, "rxq%d: %s", qidx + start_qidx, otx2_queue_stats[stats].name); *data += ETH_GSTRING_LEN; } } for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { for (stats = 0; stats < otx2_n_queue_stats; stats++) { if (qidx >= pfvf->hw.non_qos_queues) sprintf(*data, "txq_qos%d: %s", qidx + start_qidx - pfvf->hw.non_qos_queues, otx2_queue_stats[stats].name); else sprintf(*data, "txq%d: %s", qidx + start_qidx, otx2_queue_stats[stats].name); *data += ETH_GSTRING_LEN; } } } static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) { struct otx2_nic *pfvf = netdev_priv(netdev); int stats; if (sset != ETH_SS_STATS) return; for (stats = 0; stats < otx2_n_dev_stats; stats++) { memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (stats = 0; stats < otx2_n_drv_stats; stats++) { memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } otx2_get_qset_strings(pfvf, &data, 0); if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { sprintf(data, "cgx_rxstat%d: ", stats); data += ETH_GSTRING_LEN; } for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { sprintf(data, "cgx_txstat%d: ", stats); data += ETH_GSTRING_LEN; } } strcpy(data, "reset_count"); data += ETH_GSTRING_LEN; sprintf(data, "Fec Corrected Errors: "); data += ETH_GSTRING_LEN; sprintf(data, "Fec Uncorrected Errors: "); data += ETH_GSTRING_LEN; } static void otx2_get_qset_stats(struct otx2_nic *pfvf, struct ethtool_stats *stats, u64 **data) { int stat, qidx; if (!pfvf) return; for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { if (!otx2_update_rq_stats(pfvf, qidx)) { for (stat = 0; stat < otx2_n_queue_stats; stat++) *((*data)++) = 0; continue; } for (stat = 0; stat < otx2_n_queue_stats; stat++) *((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats) [otx2_queue_stats[stat].index]; } for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { if (!otx2_update_sq_stats(pfvf, qidx)) { for (stat = 0; stat < otx2_n_queue_stats; stat++) *((*data)++) = 0; continue; } for (stat = 0; stat < otx2_n_queue_stats; stat++) *((*data)++) = ((u64 *)&pfvf->qset.sq[qidx].stats) [otx2_queue_stats[stat].index]; } } static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf) { struct msg_req *req; int rc = -ENOMEM; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox); if (!req) goto end; if (!otx2_sync_mbox_msg(&pfvf->mbox)) rc = 0; end: mutex_unlock(&pfvf->mbox.lock); return rc; } /* Get device and per queue statistics */ static void otx2_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct otx2_nic *pfvf = netdev_priv(netdev); u64 fec_corr_blks, fec_uncorr_blks; struct cgx_fw_data *rsp; int stat; otx2_get_dev_stats(pfvf); for (stat = 0; stat < otx2_n_dev_stats; stat++) *(data++) = ((u64 *)&pfvf->hw.dev_stats) [otx2_dev_stats[stat].index]; for (stat = 0; stat < otx2_n_drv_stats; stat++) *(data++) = atomic_read(&((atomic_t *)&pfvf->hw.drv_stats) [otx2_drv_stats[stat].index]); otx2_get_qset_stats(pfvf, stats, &data); if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { otx2_update_lmac_stats(pfvf); for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) *(data++) = pfvf->hw.cgx_rx_stats[stat]; for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) *(data++) = pfvf->hw.cgx_tx_stats[stat]; } *(data++) = pfvf->reset_count; fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks; rsp = otx2_get_fwdata(pfvf); if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && !otx2_get_phy_fec_stats(pfvf)) { /* Fetch fwdata again because it's been recently populated with * latest PHY FEC stats. */ rsp = otx2_get_fwdata(pfvf); if (!IS_ERR(rsp)) { struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; if (pfvf->linfo.fec == OTX2_FEC_BASER) { fec_corr_blks = p->brfec_corr_blks; fec_uncorr_blks = p->brfec_uncorr_blks; } else { fec_corr_blks = p->rsfec_corr_cws; fec_uncorr_blks = p->rsfec_uncorr_cws; } } } *(data++) = fec_corr_blks; *(data++) = fec_uncorr_blks; } static int otx2_get_sset_count(struct net_device *netdev, int sset) { struct otx2_nic *pfvf = netdev_priv(netdev); int qstats_count, mac_stats = 0; if (sset != ETH_SS_STATS) return -EINVAL; qstats_count = otx2_n_queue_stats * (pfvf->hw.rx_queues + otx2_get_total_tx_queues(pfvf)); if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT; otx2_update_lmac_fec_stats(pfvf); return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + mac_stats + OTX2_FEC_STATS_CNT + 1; } /* Get no of queues device supports and current queue count */ static void otx2_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct otx2_nic *pfvf = netdev_priv(dev); channel->max_rx = pfvf->hw.max_queues; channel->max_tx = pfvf->hw.max_queues; channel->rx_count = pfvf->hw.rx_queues; channel->tx_count = pfvf->hw.tx_queues; } /* Set no of Tx, Rx queues to be used */ static int otx2_set_channels(struct net_device *dev, struct ethtool_channels *channel) { struct otx2_nic *pfvf = netdev_priv(dev); bool if_up = netif_running(dev); int err, qos_txqs; if (!channel->rx_count || !channel->tx_count) return -EINVAL; if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) { netdev_err(dev, "Receive queues are in use by TC police action\n"); return -EINVAL; } if (if_up) dev->netdev_ops->ndo_stop(dev); qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap, OTX2_QOS_MAX_LEAF_NODES); err = otx2_set_real_num_queues(dev, channel->tx_count + qos_txqs, channel->rx_count); if (err) return err; pfvf->hw.rx_queues = channel->rx_count; pfvf->hw.tx_queues = channel->tx_count; if (pfvf->xdp_prog) pfvf->hw.xdp_queues = channel->rx_count; pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues; if (if_up) err = dev->netdev_ops->ndo_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", pfvf->hw.tx_queues, pfvf->hw.rx_queues); return err; } static void otx2_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_pause_frm_cfg *req, *rsp; if (is_otx2_lbkvf(pfvf->pdev)) return; req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); if (!req) return; if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } } static int otx2_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct otx2_nic *pfvf = netdev_priv(netdev); if (pause->autoneg) return -EOPNOTSUPP; if (is_otx2_lbkvf(pfvf->pdev)) return -EOPNOTSUPP; if (pause->rx_pause) pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; else pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; if (pause->tx_pause) pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; else pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; return otx2_config_pause_frm(pfvf); } static void otx2_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_qset *qs = &pfvf->qset; ring->rx_max_pending = Q_COUNT(Q_SIZE_MAX); ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256); ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX); ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K); kernel_ring->rx_buf_len = pfvf->hw.rbuf_len; kernel_ring->cqe_size = pfvf->hw.xqe_size; } static int otx2_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); u32 rx_buf_len = kernel_ring->rx_buf_len; u32 old_rx_buf_len = pfvf->hw.rbuf_len; u32 xqe_size = kernel_ring->cqe_size; bool if_up = netif_running(netdev); struct otx2_qset *qs = &pfvf->qset; u32 rx_count, tx_count; if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Hardware supports max size of 32k for a receive buffer * and 1536 is typical ethernet frame size. */ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) { netdev_err(netdev, "Receive buffer range is 1536 - 32768"); return -EINVAL; } if (xqe_size != 128 && xqe_size != 512) { netdev_err(netdev, "Completion event size must be 128 or 512"); return -EINVAL; } /* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */ rx_count = ring->rx_pending; /* On some silicon variants a skid or reserved CQEs are * needed to avoid CQ overflow. */ if (rx_count < pfvf->hw.rq_skid) rx_count = pfvf->hw.rq_skid; rx_count = Q_COUNT(Q_SIZE(rx_count, 3)); /* Due pipelining impact minimum 2000 unused SQ CQE's * need to be maintained to avoid CQ overflow, hence the * minimum 4K size. */ tx_count = clamp_t(u32, ring->tx_pending, Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX)); tx_count = Q_COUNT(Q_SIZE(tx_count, 3)); if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt && rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size) return 0; if (if_up) netdev->netdev_ops->ndo_stop(netdev); /* Assigned to the nearest possible exponent. */ qs->sqe_cnt = tx_count; qs->rqe_cnt = rx_count; pfvf->hw.rbuf_len = rx_buf_len; pfvf->hw.xqe_size = xqe_size; if (if_up) return netdev->netdev_ops->ndo_open(netdev); return 0; } static int otx2_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *cmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; cmd->rx_coalesce_usecs = hw->cq_time_wait; cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; cmd->tx_coalesce_usecs = hw->cq_time_wait; cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { cmd->use_adaptive_rx_coalesce = 1; cmd->use_adaptive_tx_coalesce = 1; } else { cmd->use_adaptive_rx_coalesce = 0; cmd->use_adaptive_tx_coalesce = 0; } return 0; } static int otx2_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; u8 priv_coalesce_status; int qidx; if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) return 0; if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) { netdev_err(netdev, "adaptive-rx should be same as adaptive-tx"); return -EINVAL; } /* Check and update coalesce status */ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { priv_coalesce_status = 1; if (!ec->use_adaptive_rx_coalesce) pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED; } else { priv_coalesce_status = 0; if (ec->use_adaptive_rx_coalesce) pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED; } /* 'cq_time_wait' is 8bit and is in multiple of 100ns, * so clamp the user given value to the range of 1 to 25usec. */ ec->rx_coalesce_usecs = clamp_t(u32, ec->rx_coalesce_usecs, 1, CQ_TIMER_THRESH_MAX); ec->tx_coalesce_usecs = clamp_t(u32, ec->tx_coalesce_usecs, 1, CQ_TIMER_THRESH_MAX); /* Rx and Tx are mapped to same CQ, check which one * is changed, if both then choose the min. */ if (hw->cq_time_wait == ec->rx_coalesce_usecs) hw->cq_time_wait = ec->tx_coalesce_usecs; else if (hw->cq_time_wait == ec->tx_coalesce_usecs) hw->cq_time_wait = ec->rx_coalesce_usecs; else hw->cq_time_wait = min_t(u8, ec->rx_coalesce_usecs, ec->tx_coalesce_usecs); /* Max ecount_wait supported is 16bit, * so clamp the user given value to the range of 1 to 64k. */ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, 1, NAPI_POLL_WEIGHT); ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, 1, NAPI_POLL_WEIGHT); /* Rx and Tx are mapped to same CQ, check which one * is changed, if both then choose the min. */ if (hw->cq_ecount_wait == ec->rx_max_coalesced_frames) hw->cq_ecount_wait = ec->tx_max_coalesced_frames; else if (hw->cq_ecount_wait == ec->tx_max_coalesced_frames) hw->cq_ecount_wait = ec->rx_max_coalesced_frames; else hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, ec->tx_max_coalesced_frames); /* Reset 'cq_time_wait' and 'cq_ecount_wait' to * default values if coalesce status changed from * 'on' to 'off'. */ if (priv_coalesce_status && ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) != OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT; hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; } if (netif_running(netdev)) { for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) otx2_config_irq_coalescing(pfvf, qidx); } return 0; } static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; if (!(rss->flowkey_cfg & (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) return 0; /* Mimimum is IPv4 and IPv6, SIP/DIP */ nfc->data = RXH_IP_SRC | RXH_IP_DST; if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN) nfc->data |= RXH_VLAN; switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_TCP) nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: case UDP_V6_FLOW: if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_UDP) nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_SCTP) nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case AH_ESP_V4_FLOW: case AH_ESP_V6_FLOW: if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP) nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: break; case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: break; default: return -EINVAL; } return 0; } static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; u32 rss_cfg = rss->flowkey_cfg; if (!rss->enable) { netdev_err(pfvf->netdev, "RSS is disabled, cannot change settings\n"); return -EIO; } /* Mimimum is IPv4 and IPv6, SIP/DIP */ if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) return -EINVAL; if (nfc->data & RXH_VLAN) rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN; else rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN; switch (nfc->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: /* Different config for v4 and v6 is not supported. * Both of them have to be either 4-tuple or 2-tuple. */ switch (nfc->data & rxh_l4) { case 0: rss_cfg &= ~NIX_FLOW_KEY_TYPE_TCP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): rss_cfg |= NIX_FLOW_KEY_TYPE_TCP; break; default: return -EINVAL; } break; case UDP_V4_FLOW: case UDP_V6_FLOW: switch (nfc->data & rxh_l4) { case 0: rss_cfg &= ~NIX_FLOW_KEY_TYPE_UDP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): rss_cfg |= NIX_FLOW_KEY_TYPE_UDP; break; default: return -EINVAL; } break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: switch (nfc->data & rxh_l4) { case 0: rss_cfg &= ~NIX_FLOW_KEY_TYPE_SCTP; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): rss_cfg |= NIX_FLOW_KEY_TYPE_SCTP; break; default: return -EINVAL; } break; case AH_ESP_V4_FLOW: case AH_ESP_V6_FLOW: switch (nfc->data & rxh_l4) { case 0: rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH); rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN | NIX_FLOW_KEY_TYPE_IPV4_PROTO; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): /* If VLAN hashing is also requested for ESP then do not * allow because of hardware 40 bytes flow key limit. */ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) { netdev_err(pfvf->netdev, "RSS hash of ESP or AH with VLAN is not supported\n"); return -EOPNOTSUPP; } rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH; /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes) * and ESP SPI+sequence(8 bytes) uses hardware maximum * limit of 40 byte flow key. */ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO; break; default: return -EINVAL; } break; case IPV4_FLOW: case IPV6_FLOW: rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; break; default: return -EINVAL; } rss->flowkey_cfg = rss_cfg; otx2_set_flowkey_cfg(pfvf); return 0; } static int otx2_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc, u32 *rules) { bool ntuple = !!(dev->features & NETIF_F_NTUPLE); struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (nfc->cmd) { case ETHTOOL_GRXRINGS: nfc->data = pfvf->hw.rx_queues; ret = 0; break; case ETHTOOL_GRXCLSRLCNT: if (netif_running(dev) && ntuple) { nfc->rule_cnt = pfvf->flow_cfg->nr_flows; ret = 0; } break; case ETHTOOL_GRXCLSRULE: if (netif_running(dev) && ntuple) ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); break; case ETHTOOL_GRXCLSRLALL: if (netif_running(dev) && ntuple) ret = otx2_get_all_flows(pfvf, nfc, rules); break; case ETHTOOL_GRXFH: return otx2_get_rss_hash_opts(pfvf, nfc); default: break; } return ret; } static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) { bool ntuple = !!(dev->features & NETIF_F_NTUPLE); struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { case ETHTOOL_SRXFH: ret = otx2_set_rss_hash_opts(pfvf, nfc); break; case ETHTOOL_SRXCLSRLINS: if (netif_running(dev) && ntuple) ret = otx2_add_flow(pfvf, nfc); break; case ETHTOOL_SRXCLSRLDEL: if (netif_running(dev) && ntuple) ret = otx2_remove_flow(pfvf, nfc->fs.location); break; default: break; } return ret; } static u32 otx2_get_rxfh_key_size(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_rss_info *rss; rss = &pfvf->hw.rss_info; return sizeof(rss->key); } static u32 otx2_get_rxfh_indir_size(struct net_device *dev) { return MAX_RSS_INDIR_TBL_SIZE; } static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; otx2_rss_ctx_flow_del(pfvf, ctx_id); kfree(rss->rss_ctx[ctx_id]); rss->rss_ctx[ctx_id] = NULL; return 0; } static int otx2_rss_ctx_create(struct otx2_nic *pfvf, u32 *rss_context) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; u8 ctx; for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { if (!rss->rss_ctx[ctx]) break; } if (ctx == MAX_RSS_GROUPS) return -EINVAL; rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); if (!rss->rss_ctx[ctx]) return -ENOMEM; *rss_context = ctx; return 0; } /* RSS context configuration */ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir, const u8 *hkey, const u8 hfunc, u32 *rss_context, bool delete) { struct otx2_nic *pfvf = netdev_priv(dev); struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; int ret, idx; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; if (*rss_context != ETH_RXFH_CONTEXT_ALLOC && *rss_context >= MAX_RSS_GROUPS) return -EINVAL; rss = &pfvf->hw.rss_info; if (!rss->enable) { netdev_err(dev, "RSS is disabled, cannot change settings\n"); return -EIO; } if (hkey) { memcpy(rss->key, hkey, sizeof(rss->key)); otx2_set_rss_key(pfvf); } if (delete) return otx2_rss_ctx_delete(pfvf, *rss_context); if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { ret = otx2_rss_ctx_create(pfvf, rss_context); if (ret) return ret; } if (indir) { rss_ctx = rss->rss_ctx[*rss_context]; for (idx = 0; idx < rss->rss_size; idx++) rss_ctx->ind_tbl[idx] = indir[idx]; } otx2_set_rss_table(pfvf, *rss_context); return 0; } static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir, u8 *hkey, u8 *hfunc, u32 rss_context) { struct otx2_nic *pfvf = netdev_priv(dev); struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; int idx, rx_queues; rss = &pfvf->hw.rss_info; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { rx_queues = pfvf->hw.rx_queues; for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); return 0; } if (rss_context >= MAX_RSS_GROUPS) return -ENOENT; rss_ctx = rss->rss_ctx[rss_context]; if (!rss_ctx) return -ENOENT; if (indir) { for (idx = 0; idx < rss->rss_size; idx++) indir[idx] = rss_ctx->ind_tbl[idx]; } if (hkey) memcpy(hkey, rss->key, sizeof(rss->key)); return 0; } /* Get RSS configuration */ static int otx2_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, u8 *hfunc) { return otx2_get_rxfh_context(dev, indir, hkey, hfunc, DEFAULT_RSS_CONTEXT_GROUP); } /* Configure RSS table and hash key */ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *hkey, const u8 hfunc) { u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0); } static u32 otx2_get_msglevel(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); return pfvf->msg_enable; } static void otx2_set_msglevel(struct net_device *netdev, u32 val) { struct otx2_nic *pfvf = netdev_priv(netdev); pfvf->msg_enable = val; } static u32 otx2_get_link(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); /* LBK link is internal and always UP */ if (is_otx2_lbkvf(pfvf->pdev)) return 1; return pfvf->linfo.link_up; } static int otx2_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { struct otx2_nic *pfvf = netdev_priv(netdev); if (!pfvf->ptp) return ethtool_op_get_ts_info(netdev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = otx2_ptp_clock_index(pfvf); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; } static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf) { struct cgx_fw_data *rsp = NULL; struct msg_req *req; int err = 0; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return ERR_PTR(-ENOMEM); } err = otx2_sync_mbox_msg(&pfvf->mbox); if (!err) { rsp = (struct cgx_fw_data *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); } else { rsp = ERR_PTR(err); } mutex_unlock(&pfvf->mbox.lock); return rsp; } static int otx2_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_fw_data *rsp; const int fec[] = { ETHTOOL_FEC_OFF, ETHTOOL_FEC_BASER, ETHTOOL_FEC_RS, ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS}; #define FEC_MAX_INDEX 4 if (pfvf->linfo.fec < FEC_MAX_INDEX) fecparam->active_fec = fec[pfvf->linfo.fec]; rsp = otx2_get_fwdata(pfvf); if (IS_ERR(rsp)) return PTR_ERR(rsp); if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) { if (!rsp->fwdata.supported_fec) fecparam->fec = ETHTOOL_FEC_NONE; else fecparam->fec = fec[rsp->fwdata.supported_fec]; } return 0; } static int otx2_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { struct otx2_nic *pfvf = netdev_priv(netdev); struct mbox *mbox = &pfvf->mbox; struct fec_mode *req, *rsp; int err = 0, fec = 0; switch (fecparam->fec) { /* Firmware does not support AUTO mode consider it as FEC_OFF */ case ETHTOOL_FEC_OFF: case ETHTOOL_FEC_AUTO: fec = OTX2_FEC_OFF; break; case ETHTOOL_FEC_RS: fec = OTX2_FEC_RS; break; case ETHTOOL_FEC_BASER: fec = OTX2_FEC_BASER; break; default: netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d", fecparam->fec); return -EINVAL; } if (fec == pfvf->linfo.fec) return 0; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox); if (!req) { err = -ENOMEM; goto end; } req->fec = fec; err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) goto end; rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); if (rsp->fec >= 0) pfvf->linfo.fec = rsp->fec; else err = rsp->fec; end: mutex_unlock(&mbox->lock); return err; } static void otx2_get_fec_info(u64 index, int req_mode, struct ethtool_link_ksettings *link_ksettings) { __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, }; switch (index) { case OTX2_FEC_NONE: linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, otx2_fec_modes); break; case OTX2_FEC_BASER: linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, otx2_fec_modes); break; case OTX2_FEC_RS: linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, otx2_fec_modes); break; case OTX2_FEC_BASER | OTX2_FEC_RS: linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, otx2_fec_modes); linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, otx2_fec_modes); break; } /* Add fec modes to existing modes */ if (req_mode == OTX2_MODE_ADVERTISED) linkmode_or(link_ksettings->link_modes.advertising, link_ksettings->link_modes.advertising, otx2_fec_modes); else linkmode_or(link_ksettings->link_modes.supported, link_ksettings->link_modes.supported, otx2_fec_modes); } static void otx2_get_link_mode_info(u64 link_mode_bmap, bool req_mode, struct ethtool_link_ksettings *link_ksettings) { __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; const int otx2_sgmii_features[6] = { ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }; /* CGX link modes to Ethtool link mode mapping */ const int cgx_link_mode[27] = { 0, /* SGMII Mode */ ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 0, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 0, 0, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 0, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 0, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 0, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT }; u8 bit; for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { /* SGMII mode is set */ if (bit == 0) linkmode_set_bit_array(otx2_sgmii_features, ARRAY_SIZE(otx2_sgmii_features), otx2_link_modes); else linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); } if (req_mode == OTX2_MODE_ADVERTISED) linkmode_copy(link_ksettings->link_modes.advertising, otx2_link_modes); else linkmode_copy(link_ksettings->link_modes.supported, otx2_link_modes); } static int otx2_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_fw_data *rsp = NULL; cmd->base.duplex = pfvf->linfo.full_duplex; cmd->base.speed = pfvf->linfo.speed; cmd->base.autoneg = pfvf->linfo.an; rsp = otx2_get_fwdata(pfvf); if (IS_ERR(rsp)) return PTR_ERR(rsp); if (rsp->fwdata.supported_an) ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes, OTX2_MODE_ADVERTISED, cmd); otx2_get_fec_info(rsp->fwdata.advertised_fec, OTX2_MODE_ADVERTISED, cmd); otx2_get_link_mode_info(rsp->fwdata.supported_link_modes, OTX2_MODE_SUPPORTED, cmd); otx2_get_fec_info(rsp->fwdata.supported_fec, OTX2_MODE_SUPPORTED, cmd); return 0; } static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, u64 *mode) { u32 bit_pos; /* Firmware does not support requesting multiple advertised modes * return first set bit */ bit_pos = find_first_bit(cmd->link_modes.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) *mode = bit_pos; } static int otx2_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct otx2_nic *pf = netdev_priv(netdev); struct ethtool_link_ksettings cur_ks; struct cgx_set_link_mode_req *req; struct mbox *mbox = &pf->mbox; int err = 0; memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings)); if (!ethtool_validate_speed(cmd->base.speed) || !ethtool_validate_duplex(cmd->base.duplex)) return -EINVAL; if (cmd->base.autoneg != AUTONEG_ENABLE && cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; otx2_get_link_ksettings(netdev, &cur_ks); /* Check requested modes against supported modes by hardware */ if (!linkmode_subset(cmd->link_modes.advertising, cur_ks.link_modes.supported)) return -EINVAL; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox); if (!req) { err = -ENOMEM; goto end; } req->args.speed = cmd->base.speed; /* firmware expects 1 for half duplex and 0 for full duplex * hence inverting */ req->args.duplex = cmd->base.duplex ^ 0x1; req->args.an = cmd->base.autoneg; otx2_get_advertised_mode(cmd, &req->args.mode); err = otx2_sync_mbox_msg(&pf->mbox); end: mutex_unlock(&mbox->lock); return err; } static void otx2_get_fec_stats(struct net_device *netdev, struct ethtool_fec_stats *fec_stats) { struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_fw_data *rsp; otx2_update_lmac_fec_stats(pfvf); /* Report MAC FEC stats */ fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks; fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks; rsp = otx2_get_fwdata(pfvf); if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats && !otx2_get_phy_fec_stats(pfvf)) { /* Fetch fwdata again because it's been recently populated with * latest PHY FEC stats. */ rsp = otx2_get_fwdata(pfvf); if (!IS_ERR(rsp)) { struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats; if (pfvf->linfo.fec == OTX2_FEC_BASER) { fec_stats->corrected_blocks.total = p->brfec_corr_blks; fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks; } else { fec_stats->corrected_blocks.total = p->rsfec_corr_cws; fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws; } } } } static const struct ethtool_ops otx2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2_get_drvinfo, .get_strings = otx2_get_strings, .get_ethtool_stats = otx2_get_ethtool_stats, .get_sset_count = otx2_get_sset_count, .set_channels = otx2_set_channels, .get_channels = otx2_get_channels, .get_ringparam = otx2_get_ringparam, .set_ringparam = otx2_set_ringparam, .get_coalesce = otx2_get_coalesce, .set_coalesce = otx2_set_coalesce, .get_rxnfc = otx2_get_rxnfc, .set_rxnfc = otx2_set_rxnfc, .get_rxfh_key_size = otx2_get_rxfh_key_size, .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, .get_rxfh_context = otx2_get_rxfh_context, .set_rxfh_context = otx2_set_rxfh_context, .get_msglevel = otx2_get_msglevel, .set_msglevel = otx2_set_msglevel, .get_pauseparam = otx2_get_pauseparam, .set_pauseparam = otx2_set_pauseparam, .get_ts_info = otx2_get_ts_info, .get_fec_stats = otx2_get_fec_stats, .get_fecparam = otx2_get_fecparam, .set_fecparam = otx2_set_fecparam, .get_link_ksettings = otx2_get_link_ksettings, .set_link_ksettings = otx2_set_link_ksettings, }; void otx2_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &otx2_ethtool_ops; } /* VF's ethtool APIs */ static void otx2vf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct otx2_nic *vf = netdev_priv(netdev); strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info)); } static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { struct otx2_nic *vf = netdev_priv(netdev); int stats; if (sset != ETH_SS_STATS) return; for (stats = 0; stats < otx2_n_dev_stats; stats++) { memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (stats = 0; stats < otx2_n_drv_stats; stats++) { memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } otx2_get_qset_strings(vf, &data, 0); strcpy(data, "reset_count"); data += ETH_GSTRING_LEN; } static void otx2vf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct otx2_nic *vf = netdev_priv(netdev); int stat; otx2_get_dev_stats(vf); for (stat = 0; stat < otx2_n_dev_stats; stat++) *(data++) = ((u64 *)&vf->hw.dev_stats) [otx2_dev_stats[stat].index]; for (stat = 0; stat < otx2_n_drv_stats; stat++) *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats) [otx2_drv_stats[stat].index]); otx2_get_qset_stats(vf, stats, &data); *(data++) = vf->reset_count; } static int otx2vf_get_sset_count(struct net_device *netdev, int sset) { struct otx2_nic *vf = netdev_priv(netdev); int qstats_count; if (sset != ETH_SS_STATS) return -EINVAL; qstats_count = otx2_n_queue_stats * (vf->hw.rx_queues + otx2_get_total_tx_queues(vf)); return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1; } static int otx2vf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); if (is_otx2_lbkvf(pfvf->pdev)) { cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = SPEED_100000; } else { return otx2_get_link_ksettings(netdev, cmd); } return 0; } static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, .get_drvinfo = otx2vf_get_drvinfo, .get_strings = otx2vf_get_strings, .get_ethtool_stats = otx2vf_get_ethtool_stats, .get_sset_count = otx2vf_get_sset_count, .set_channels = otx2_set_channels, .get_channels = otx2_get_channels, .get_rxnfc = otx2_get_rxnfc, .set_rxnfc = otx2_set_rxnfc, .get_rxfh_key_size = otx2_get_rxfh_key_size, .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, .get_rxfh_context = otx2_get_rxfh_context, .set_rxfh_context = otx2_set_rxfh_context, .get_ringparam = otx2_get_ringparam, .set_ringparam = otx2_set_ringparam, .get_coalesce = otx2_get_coalesce, .set_coalesce = otx2_set_coalesce, .get_msglevel = otx2_get_msglevel, .set_msglevel = otx2_set_msglevel, .get_pauseparam = otx2_get_pauseparam, .set_pauseparam = otx2_set_pauseparam, .get_link_ksettings = otx2vf_get_link_ksettings, .get_ts_info = otx2_get_ts_info, }; void otx2vf_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &otx2vf_ethtool_ops; } EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Physical Function ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/etherdevice.h> #include <linux/of.h> #include <linux/if_vlan.h> #include <linux/iommu.h> #include <net/ip.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/bitfield.h> #include <net/page_pool/types.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_txrx.h" #include "otx2_struct.h" #include "otx2_ptp.h" #include "cn10k.h" #include "qos.h" #include <rvu_trace.h> #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" /* Supported devices */ static const struct pci_device_id otx2_pf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) }, { 0, } /* end of table */ }; MODULE_AUTHOR("Sunil Goutham <[email protected]>"); MODULE_DESCRIPTION(DRV_STRING); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, otx2_pf_id_table); static void otx2_vf_link_event_task(struct work_struct *work); enum { TYPE_PFAF, TYPE_PFVF, }; static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable); static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); static int otx2_change_mtu(struct net_device *netdev, int new_mtu) { struct otx2_nic *pf = netdev_priv(netdev); bool if_up = netif_running(netdev); int err = 0; if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", netdev->mtu); return -EINVAL; } if (if_up) otx2_stop(netdev); netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (if_up) err = otx2_open(netdev); return err; } static void otx2_disable_flr_me_intr(struct otx2_nic *pf) { int irq, vfs = pf->total_vfs; /* Disable VFs ME interrupts */ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0); free_irq(irq, pf); /* Disable VFs FLR interrupts */ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0); free_irq(irq, pf); if (vfs <= 64) return; otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1); free_irq(irq, pf); otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1); free_irq(irq, pf); } static void otx2_flr_wq_destroy(struct otx2_nic *pf) { if (!pf->flr_wq) return; destroy_workqueue(pf->flr_wq); pf->flr_wq = NULL; devm_kfree(pf->dev, pf->flr_wrk); } static void otx2_flr_handler(struct work_struct *work) { struct flr_work *flrwork = container_of(work, struct flr_work, work); struct otx2_nic *pf = flrwork->pf; struct mbox *mbox = &pf->mbox; struct msg_req *req; int vf, reg = 0; vf = flrwork - pf->flr_wrk; mutex_lock(&mbox->lock); req = otx2_mbox_alloc_msg_vf_flr(mbox); if (!req) { mutex_unlock(&mbox->lock); return; } req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK; req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK; if (!otx2_sync_mbox_msg(&pf->mbox)) { if (vf >= 64) { reg = 1; vf = vf - 64; } /* clear transcation pending bit */ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); } mutex_unlock(&mbox->lock); } static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; int reg, dev, vf, start_vf, num_reg = 1; u64 intr; if (pf->total_vfs > 64) num_reg = 2; for (reg = 0; reg < num_reg; reg++) { intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg)); if (!intr) continue; start_vf = 64 * reg; for (vf = 0; vf < 64; vf++) { if (!(intr & BIT_ULL(vf))) continue; dev = vf + start_vf; queue_work(pf->flr_wq, &pf->flr_wrk[dev].work); /* Clear interrupt */ otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); /* Disable the interrupt */ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); } } return IRQ_HANDLED; } static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; int vf, reg, num_reg = 1; u64 intr; if (pf->total_vfs > 64) num_reg = 2; for (reg = 0; reg < num_reg; reg++) { intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg)); if (!intr) continue; for (vf = 0; vf < 64; vf++) { if (!(intr & BIT_ULL(vf))) continue; /* clear trpend bit */ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); /* clear interrupt */ otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf)); } } return IRQ_HANDLED; } static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) { struct otx2_hw *hw = &pf->hw; char *irq_name; int ret; /* Register ME interrupt handler*/ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) { dev_err(pf->dev, "RVUPF: IRQ registration failed for ME0\n"); } /* Register FLR interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) { dev_err(pf->dev, "RVUPF: IRQ registration failed for FLR0\n"); return ret; } if (numvfs > 64) { irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", rvu_get_pf(pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFME1), otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) { dev_err(pf->dev, "RVUPF: IRQ registration failed for ME1\n"); } irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", rvu_get_pf(pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFFLR1), otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) { dev_err(pf->dev, "RVUPF: IRQ registration failed for FLR1\n"); return ret; } } /* Enable ME interrupt for all VFs*/ otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs)); otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs)); /* Enable FLR interrupt for all VFs*/ otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs)); otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs)); if (numvfs > 64) { numvfs -= 64; otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs)); otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(numvfs)); otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs)); otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(numvfs)); } return 0; } static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) { int vf; pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI); if (!pf->flr_wq) return -ENOMEM; pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs, sizeof(struct flr_work), GFP_KERNEL); if (!pf->flr_wrk) { destroy_workqueue(pf->flr_wq); return -ENOMEM; } for (vf = 0; vf < num_vfs; vf++) { pf->flr_wrk[vf].pf = pf; INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler); } return 0; } static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, int first, int mdevs, u64 intr, int type) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; int i; for (i = first; i < mdevs; i++) { /* start from 0 */ if (!(intr & BIT_ULL(i - first))) continue; mbox = &mw->mbox; mdev = &mbox->dev[i]; if (type == TYPE_PFAF) otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; /* The hdr->num_msgs is set to zero immediately in the interrupt * handler to ensure that it holds a correct value next time * when the interrupt handler is called. * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler * pf>mbox.up_num_msgs holds the data for use in * pfaf_mbox_up_handler. */ if (hdr->num_msgs) { mw[i].num_msgs = hdr->num_msgs; hdr->num_msgs = 0; if (type == TYPE_PFAF) memset(mbox->hwbase + mbox->rx_start, 0, ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); queue_work(mbox_wq, &mw[i].mbox_wrk); } mbox = &mw->mbox_up; mdev = &mbox->dev[i]; if (type == TYPE_PFAF) otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; if (hdr->num_msgs) { mw[i].up_num_msgs = hdr->num_msgs; hdr->num_msgs = 0; if (type == TYPE_PFAF) memset(mbox->hwbase + mbox->rx_start, 0, ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); queue_work(mbox_wq, &mw[i].mbox_up_wrk); } } } static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, struct otx2_mbox *pfvf_mbox, void *bbuf_base, int devid) { struct otx2_mbox_dev *src_mdev = mdev; int offset; /* Msgs are already copied, trigger VF's mbox irq */ smp_wmb(); offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); /* Restore VF's mbox bounce buffer region address */ src_mdev->mbase = bbuf_base; } static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, struct otx2_mbox *src_mbox, int dir, int vf, int num_msgs) { struct otx2_mbox_dev *src_mdev, *dst_mdev; struct mbox_hdr *mbox_hdr; struct mbox_hdr *req_hdr; struct mbox *dst_mbox; int dst_size, err; if (dir == MBOX_DIR_PFAF) { /* Set VF's mailbox memory as PF's bounce buffer memory, so * that explicit copying of VF's msgs to PF=>AF mbox region * and AF=>PF responses to VF's mbox region can be avoided. */ src_mdev = &src_mbox->dev[vf]; mbox_hdr = src_mbox->hwbase + src_mbox->rx_start + (vf * MBOX_SIZE); dst_mbox = &pf->mbox; dst_size = dst_mbox->mbox.tx_size - ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); /* Check if msgs fit into destination area and has valid size */ if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size) return -EINVAL; dst_mdev = &dst_mbox->mbox.dev[0]; mutex_lock(&pf->mbox.lock); dst_mdev->mbase = src_mdev->mbase; dst_mdev->msg_size = mbox_hdr->msg_size; dst_mdev->num_msgs = num_msgs; err = otx2_sync_mbox_msg(dst_mbox); /* Error code -EIO indicate there is a communication failure * to the AF. Rest of the error codes indicate that AF processed * VF messages and set the error codes in response messages * (if any) so simply forward responses to VF. */ if (err == -EIO) { dev_warn(pf->dev, "AF not responding to VF%d messages\n", vf); /* restore PF mbase and exit */ dst_mdev->mbase = pf->mbox.bbuf_base; mutex_unlock(&pf->mbox.lock); return err; } /* At this point, all the VF messages sent to AF are acked * with proper responses and responses are copied to VF * mailbox hence raise interrupt to VF. */ req_hdr = (struct mbox_hdr *)(dst_mdev->mbase + dst_mbox->mbox.rx_start); req_hdr->num_msgs = num_msgs; otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox, pf->mbox.bbuf_base, vf); mutex_unlock(&pf->mbox.lock); } else if (dir == MBOX_DIR_PFVF_UP) { src_mdev = &src_mbox->dev[0]; mbox_hdr = src_mbox->hwbase + src_mbox->rx_start; req_hdr = (struct mbox_hdr *)(src_mdev->mbase + src_mbox->rx_start); req_hdr->num_msgs = num_msgs; dst_mbox = &pf->mbox_pfvf[0]; dst_size = dst_mbox->mbox_up.tx_size - ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN); /* Check if msgs fit into destination area */ if (mbox_hdr->msg_size > dst_size) return -EINVAL; dst_mdev = &dst_mbox->mbox_up.dev[vf]; dst_mdev->mbase = src_mdev->mbase; dst_mdev->msg_size = mbox_hdr->msg_size; dst_mdev->num_msgs = mbox_hdr->num_msgs; err = otx2_sync_mbox_up_msg(dst_mbox, vf); if (err) { dev_warn(pf->dev, "VF%d is not responding to mailbox\n", vf); return err; } } else if (dir == MBOX_DIR_VFPF_UP) { req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase + src_mbox->rx_start); req_hdr->num_msgs = num_msgs; otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf], &pf->mbox.mbox_up, pf->mbox_pfvf[vf].bbuf_base, 0); } return 0; } static void otx2_pfvf_mbox_handler(struct work_struct *work) { struct mbox_msghdr *msg = NULL; int offset, vf_idx, id, err; struct otx2_mbox_dev *mdev; struct mbox_hdr *req_hdr; struct otx2_mbox *mbox; struct mbox *vf_mbox; struct otx2_nic *pf; vf_mbox = container_of(work, struct mbox, mbox_wrk); pf = vf_mbox->pfvf; vf_idx = vf_mbox - pf->mbox_pfvf; mbox = &pf->mbox_pfvf[0].mbox; mdev = &mbox->dev[vf_idx]; req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset); if (msg->sig != OTX2_MBOX_REQ_SIG) goto inval_msg; /* Set VF's number in each of the msg */ msg->pcifunc &= RVU_PFVF_FUNC_MASK; msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; offset = msg->next_msgoff; } err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx, vf_mbox->num_msgs); if (err) goto inval_msg; return; inval_msg: otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id); otx2_mbox_msg_send(mbox, vf_idx); } static void otx2_pfvf_mbox_up_handler(struct work_struct *work) { struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk); struct otx2_nic *pf = vf_mbox->pfvf; struct otx2_mbox_dev *mdev; int offset, id, vf_idx = 0; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; vf_idx = vf_mbox - pf->mbox_pfvf; mbox = &pf->mbox_pfvf[0].mbox_up; mdev = &mbox->dev[vf_idx]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; if (msg->id >= MBOX_MSG_MAX) { dev_err(pf->dev, "Mbox msg with unknown ID 0x%x\n", msg->id); goto end; } if (msg->sig != OTX2_MBOX_RSP_SIG) { dev_err(pf->dev, "Mbox msg with wrong signature %x, ID 0x%x\n", msg->sig, msg->id); goto end; } switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: break; default: if (msg->rc) dev_err(pf->dev, "Mbox msg response has err %d, ID 0x%x\n", msg->rc, msg->id); break; } end: offset = mbox->rx_start + msg->next_msgoff; if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) __otx2_mbox_reset(mbox, 0); mdev->msgs_acked++; } } static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); int vfs = pf->total_vfs; struct mbox *mbox; u64 intr; mbox = pf->mbox_pfvf; /* Handle VF interrupts */ if (vfs > 64) { intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, TYPE_PFVF); vfs -= 64; } intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); return IRQ_HANDLED; } static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) { void __iomem *hwbase; struct mbox *mbox; int err, vf; u64 base; if (!numvfs) return -EINVAL; pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs, sizeof(struct mbox), GFP_KERNEL); if (!pf->mbox_pfvf) return -ENOMEM; pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!pf->mbox_pfvf_wq) return -ENOMEM; /* On CN10K platform, PF <-> VF mailbox region follows after * PF <-> AF mailbox region. */ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + MBOX_SIZE; else base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR)); hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); if (!hwbase) { err = -ENOMEM; goto free_wq; } mbox = &pf->mbox_pfvf[0]; err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, MBOX_DIR_PFVF, numvfs); if (err) goto free_iomem; err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, MBOX_DIR_PFVF_UP, numvfs); if (err) goto free_iomem; for (vf = 0; vf < numvfs; vf++) { mbox->pfvf = pf; INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler); INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler); mbox++; } return 0; free_iomem: if (hwbase) iounmap(hwbase); free_wq: destroy_workqueue(pf->mbox_pfvf_wq); return err; } static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) { struct mbox *mbox = &pf->mbox_pfvf[0]; if (!mbox) return; if (pf->mbox_pfvf_wq) { destroy_workqueue(pf->mbox_pfvf_wq); pf->mbox_pfvf_wq = NULL; } if (mbox->mbox.hwbase) iounmap(mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); } static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { /* Clear PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); /* Enable PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs)); if (numvfs > 64) { numvfs -= 64; otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), INTR_MASK(numvfs)); } } static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { int vector; /* Disable PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull); vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0); free_irq(vector, pf); if (numvfs > 64) { otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull); vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1); free_irq(vector, pf); } } static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { struct otx2_hw *hw = &pf->hw; char *irq_name; int err; /* Register MBOX0 interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); return err; } if (numvfs > 64) { /* Register MBOX1 interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1), otx2_pfvf_mbox_intr_handler, 0, irq_name, pf); if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFVF mbox1 irq\n"); return err; } } otx2_enable_pfvf_mbox_intr(pf, numvfs); return 0; } static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf, struct mbox_msghdr *msg) { int devid; if (msg->id >= MBOX_MSG_MAX) { dev_err(pf->dev, "Mbox msg with unknown ID 0x%x\n", msg->id); return; } if (msg->sig != OTX2_MBOX_RSP_SIG) { dev_err(pf->dev, "Mbox msg with wrong signature %x, ID 0x%x\n", msg->sig, msg->id); return; } /* message response heading VF */ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; if (devid) { struct otx2_vf_config *config = &pf->vf_configs[devid - 1]; struct delayed_work *dwork; switch (msg->id) { case MBOX_MSG_NIX_LF_START_RX: config->intf_down = false; dwork = &config->link_event_work; schedule_delayed_work(dwork, msecs_to_jiffies(100)); break; case MBOX_MSG_NIX_LF_STOP_RX: config->intf_down = true; break; } return; } switch (msg->id) { case MBOX_MSG_READY: pf->pcifunc = msg->pcifunc; break; case MBOX_MSG_MSIX_OFFSET: mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg); break; case MBOX_MSG_NPA_LF_ALLOC: mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg); break; case MBOX_MSG_NIX_LF_ALLOC: mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg); break; case MBOX_MSG_NIX_BP_ENABLE: mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg); break; case MBOX_MSG_CGX_STATS: mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg); break; case MBOX_MSG_CGX_FEC_STATS: mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg); break; default: if (msg->rc) dev_err(pf->dev, "Mbox msg response has err %d, ID 0x%x\n", msg->rc, msg->id); break; } } static void otx2_pfaf_mbox_handler(struct work_struct *work) { struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; struct mbox *af_mbox; struct otx2_nic *pf; int offset, id; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; for (id = 0; id < af_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); offset = mbox->rx_start + msg->next_msgoff; if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) __otx2_mbox_reset(mbox, 0); mdev->msgs_acked++; } } static void otx2_handle_link_event(struct otx2_nic *pf) { struct cgx_link_user_info *linfo = &pf->linfo; struct net_device *netdev = pf->netdev; pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, linfo->link_up ? "UP" : "DOWN", linfo->speed, linfo->full_duplex ? "Full" : "Half"); if (linfo->link_up) { netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); } else { netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } } int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, struct mcs_intr_info *event, struct msg_rsp *rsp) { cn10k_handle_mcs_event(pf, event); return 0; } int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, struct cgx_link_info_msg *msg, struct msg_rsp *rsp) { int i; /* Copy the link info sent by AF */ pf->linfo = msg->link_info; /* notify VFs about link event */ for (i = 0; i < pci_num_vf(pf->pdev); i++) { struct otx2_vf_config *config = &pf->vf_configs[i]; struct delayed_work *dwork = &config->link_event_work; if (config->intf_down) continue; schedule_delayed_work(dwork, msecs_to_jiffies(100)); } /* interface has not been fully configured yet */ if (pf->flags & OTX2_FLAG_INTF_DOWN) return 0; otx2_handle_link_event(pf); return 0; } static int otx2_process_mbox_msg_up(struct otx2_nic *pf, struct mbox_msghdr *req) { /* Check if valid, if not reply with a invalid msg */ if (req->sig != OTX2_MBOX_REQ_SIG) { otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); return -ENODEV; } switch (req->id) { #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ case _id: { \ struct _rsp_type *rsp; \ int err; \ \ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ &pf->mbox.mbox_up, 0, \ sizeof(struct _rsp_type)); \ if (!rsp) \ return -ENOMEM; \ \ rsp->hdr.id = _id; \ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ rsp->hdr.pcifunc = 0; \ rsp->hdr.rc = 0; \ \ err = otx2_mbox_up_handler_ ## _fn_name( \ pf, (struct _req_type *)req, rsp); \ return err; \ } MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M break; default: otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id); return -ENODEV; } return 0; } static void otx2_pfaf_mbox_up_handler(struct work_struct *work) { struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk); struct otx2_mbox *mbox = &af_mbox->mbox_up; struct otx2_mbox_dev *mdev = &mbox->dev[0]; struct otx2_nic *pf = af_mbox->pfvf; int offset, id, devid = 0; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); for (id = 0; id < af_mbox->up_num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; /* Skip processing VF's messages */ if (!devid) otx2_process_mbox_msg_up(pf, msg); offset = mbox->rx_start + msg->next_msgoff; } if (devid) { otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, MBOX_DIR_PFVF_UP, devid - 1, af_mbox->up_num_msgs); return; } otx2_mbox_msg_send(mbox, 0); } static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; struct mbox *mbox; /* Clear the IRQ */ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); mbox = &pf->mbox; trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); return IRQ_HANDLED; } static void otx2_disable_mbox_intr(struct otx2_nic *pf) { int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); /* Disable AF => PF mailbox IRQ */ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); free_irq(vector, pf); } static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) { struct otx2_hw *hw = &pf->hw; struct msg_req *req; char *irq_name; int err; /* Register mailbox interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFAF mbox irq\n"); return err; } /* Enable mailbox interrupt for msgs coming from AF. * First clear to avoid spurious interrupts, if any. */ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); if (!probe_af) return 0; /* Check mailbox communication with AF */ req = otx2_mbox_alloc_msg_ready(&pf->mbox); if (!req) { otx2_disable_mbox_intr(pf); return -ENOMEM; } err = otx2_sync_mbox_msg(&pf->mbox); if (err) { dev_warn(pf->dev, "AF not responding to mailbox, deferring probe\n"); otx2_disable_mbox_intr(pf); return -EPROBE_DEFER; } return 0; } static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) { struct mbox *mbox = &pf->mbox; if (pf->mbox_wq) { destroy_workqueue(pf->mbox_wq); pf->mbox_wq = NULL; } if (mbox->mbox.hwbase) iounmap((void __iomem *)mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); otx2_mbox_destroy(&mbox->mbox_up); } static int otx2_pfaf_mbox_init(struct otx2_nic *pf) { struct mbox *mbox = &pf->mbox; void __iomem *hwbase; int err; mbox->pfvf = pf; pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!pf->mbox_wq) return -ENOMEM; /* Mailbox is a reserved memory (in RAM) region shared between * admin function (i.e AF) and this PF, shouldn't be mapped as * device memory to allow unaligned accesses. */ hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE); if (!hwbase) { dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); err = -ENOMEM; goto exit; } err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base, MBOX_DIR_PFAF, 1); if (err) goto exit; err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base, MBOX_DIR_PFAF_UP, 1); if (err) goto exit; err = otx2_mbox_bbuf_init(mbox, pf->pdev); if (err) goto exit; INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler); INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler); mutex_init(&mbox->lock); return 0; exit: otx2_pfaf_mbox_destroy(pf); return err; } static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) { struct msg_req *msg; int err; mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox); else msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox); if (!msg) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) { struct msg_req *msg; int err; if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap, pf->flow_cfg->dmacflt_max_flows)) netdev_warn(pf->netdev, "CGX/RPM internal loopback might not work as DMAC filters are active\n"); mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); else msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox); if (!msg) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues) { int err; err = netif_set_real_num_tx_queues(netdev, tx_queues); if (err) { netdev_err(netdev, "Failed to set no of Tx queues: %d\n", tx_queues); return err; } err = netif_set_real_num_rx_queues(netdev, rx_queues); if (err) netdev_err(netdev, "Failed to set no of Rx queues: %d\n", rx_queues); return err; } EXPORT_SYMBOL(otx2_set_real_num_queues); static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { "NIX_SQOPERR_OOR", "NIX_SQOPERR_CTX_FAULT", "NIX_SQOPERR_CTX_POISON", "NIX_SQOPERR_DISABLED", "NIX_SQOPERR_SIZE_ERR", "NIX_SQOPERR_OFLOW", "NIX_SQOPERR_SQB_NULL", "NIX_SQOPERR_SQB_FAULT", "NIX_SQOPERR_SQE_SZ_ZERO", }; static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { "NIX_MNQERR_SQ_CTX_FAULT", "NIX_MNQERR_SQ_CTX_POISON", "NIX_MNQERR_SQB_FAULT", "NIX_MNQERR_SQB_POISON", "NIX_MNQERR_TOTAL_ERR", "NIX_MNQERR_LSO_ERR", "NIX_MNQERR_CQ_QUERY_ERR", "NIX_MNQERR_MAX_SQE_SIZE_ERR", "NIX_MNQERR_MAXLEN_ERR", "NIX_MNQERR_SQE_SIZEM1_ZERO", }; static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { "NIX_SND_STATUS_GOOD", "NIX_SND_STATUS_SQ_CTX_FAULT", "NIX_SND_STATUS_SQ_CTX_POISON", "NIX_SND_STATUS_SQB_FAULT", "NIX_SND_STATUS_SQB_POISON", "NIX_SND_STATUS_HDR_ERR", "NIX_SND_STATUS_EXT_ERR", "NIX_SND_STATUS_JUMP_FAULT", "NIX_SND_STATUS_JUMP_POISON", "NIX_SND_STATUS_CRC_ERR", "NIX_SND_STATUS_IMM_ERR", "NIX_SND_STATUS_SG_ERR", "NIX_SND_STATUS_MEM_ERR", "NIX_SND_STATUS_INVALID_SUBDC", "NIX_SND_STATUS_SUBDC_ORDER_ERR", "NIX_SND_STATUS_DATA_FAULT", "NIX_SND_STATUS_DATA_POISON", "NIX_SND_STATUS_NPC_DROP_ACTION", "NIX_SND_STATUS_LOCK_VIOL", "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", "NIX_SND_STATUS_NPC_MCAST_ABORT", "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", "NIX_SND_STATUS_SEND_STATS_ERR", }; static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; struct otx2_snd_queue *sq; u64 val, *ptr; u64 qidx = 0; /* CQ */ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) | (val & NIX_CQERRINT_BITS)); if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42)))) continue; if (val & BIT_ULL(42)) { netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); } else { if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR)) netdev_err(pf->netdev, "CQ%lld: Doorbell error", qidx); if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT)) netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM", qidx); } schedule_work(&pf->reset_task); } /* SQ */ for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; u8 sq_op_err_code, mnq_err_code, snd_err_code; sq = &pf->qset.sq[qidx]; if (!sq->sqb_ptrs) continue; /* Below debug registers captures first errors corresponding to * those registers. We don't have to check against SQ qid as * these are fatal errors. */ ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | (val & NIX_SQINT_BITS)); if (val & BIT_ULL(42)) { netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); goto done; } sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); if (!(sq_op_err_dbg & BIT(44))) goto chk_mnq_err_dbg; sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) goto chk_mnq_err_dbg; /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. * TODO: But we are in irq context. How to call mbox functions which does sleep */ chk_mnq_err_dbg: mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); if (!(mnq_err_dbg & BIT(44))) goto chk_snd_err_dbg; mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); chk_snd_err_dbg: snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); if (snd_err_dbg & BIT(44)) { snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); } done: /* Print values and reset */ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", qidx); schedule_work(&pf->reset_task); } return IRQ_HANDLED; } static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) { struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; int qidx = cq_poll->cint_idx; /* Disable interrupts. * * Completion interrupts behave in a level-triggered interrupt * fashion, and hence have to be cleared only after it is serviced. */ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); /* Schedule NAPI */ pf->napi_events++; napi_schedule_irqoff(&cq_poll->napi); return IRQ_HANDLED; } static void otx2_disable_napi(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct otx2_cq_poll *cq_poll; int qidx; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { cq_poll = &qset->napi[qidx]; cancel_work_sync(&cq_poll->dim.work); napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); } } static void otx2_free_cq_res(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct otx2_cq_queue *cq; int qidx; /* Disable CQs */ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false); for (qidx = 0; qidx < qset->cq_cnt; qidx++) { cq = &qset->cq[qidx]; qmem_free(pf->dev, cq->cqe); } } static void otx2_free_sq_res(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct otx2_snd_queue *sq; int qidx; /* Disable SQs */ otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); /* Free SQB pointers */ otx2_sq_free_sqbs(pf); for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) { sq = &qset->sq[qidx]; /* Skip freeing Qos queues if they are not initialized */ if (!sq->sqe) continue; qmem_free(pf->dev, sq->sqe); qmem_free(pf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); } } static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) { int frame_size; int total_size; int rbuf_size; if (pf->hw.rbuf_len) return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; /* The data transferred by NIX to memory consists of actual packet * plus additional data which has timestamp and/or EDSA/HIGIG2 * headers if interface is configured in corresponding modes. * NIX transfers entire data using 6 segments/buffers and writes * a CQE_RX descriptor with those segment addresses. First segment * has additional data prepended to packet. Also software omits a * headroom of 128 bytes in each segment. Hence the total size of * memory needed to receive a packet with 'mtu' is: * frame size = mtu + additional data; * memory = frame_size + headroom * 6; * each receive buffer size = memory / 6; */ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; total_size = frame_size + OTX2_HEAD_ROOM * 6; rbuf_size = total_size / 6; return ALIGN(rbuf_size, 2048); } static int otx2_init_hw_resources(struct otx2_nic *pf) { struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_hw *hw = &pf->hw; struct msg_req *req; int err = 0, lvl; /* Set required NPA LF's pool counts * Auras and Pools are used in a 1:1 mapping, * so, aura count = pool count. */ hw->rqpool_cnt = hw->rx_queues; hw->sqpool_cnt = otx2_get_total_tx_queues(pf); hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; /* Maximum hardware supported transmit length */ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); mutex_lock(&mbox->lock); /* NPA init */ err = otx2_config_npa(pf); if (err) goto exit; /* NIX init */ err = otx2_config_nix(pf); if (err) goto err_free_npa_lf; /* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ err = otx2_rq_aura_pool_init(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_nix_lf; } /* Init Auras and pools used by NIX SQ, for queueing SQEs */ err = otx2_sq_aura_pool_init(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_rq_ptrs; } err = otx2_txsch_alloc(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_sq_ptrs; } #ifdef CONFIG_DCB if (pf->pfc_en) { err = otx2_pfc_txschq_alloc(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_sq_ptrs; } } #endif err = otx2_config_nix_queues(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_txsch; } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { err = otx2_txschq_config(pf, lvl, 0, false); if (err) { mutex_unlock(&mbox->lock); goto err_free_nix_queues; } } #ifdef CONFIG_DCB if (pf->pfc_en) { err = otx2_pfc_txschq_config(pf); if (err) { mutex_unlock(&mbox->lock); goto err_free_nix_queues; } } #endif mutex_unlock(&mbox->lock); return err; err_free_nix_queues: otx2_free_sq_res(pf); otx2_free_cq_res(pf); otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); err_free_txsch: otx2_txschq_stop(pf); err_free_sq_ptrs: otx2_sq_free_sqbs(pf); err_free_rq_ptrs: otx2_free_aura_ptr(pf, AURA_NIX_RQ); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); otx2_aura_pool_free(pf); err_free_nix_lf: mutex_lock(&mbox->lock); free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); if (free_req) { free_req->flags = NIX_LF_DISABLE_FLOWS; if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free nixlf\n", __func__); } err_free_npa_lf: /* Reset NPA LF */ req = otx2_mbox_alloc_msg_npa_lf_free(mbox); if (req) { if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free npalf\n", __func__); } exit: mutex_unlock(&mbox->lock); return err; } static void otx2_free_hw_resources(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; struct otx2_pool *pool; struct msg_req *req; int pool_id; int qidx; /* Ensure all SQE are processed */ otx2_sqb_flush(pf); /* Stop transmission */ otx2_txschq_stop(pf); #ifdef CONFIG_DCB if (pf->pfc_en) otx2_pfc_txschq_stop(pf); #endif otx2_clean_qos_queues(pf); mutex_lock(&mbox->lock); /* Disable backpressure */ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) otx2_nix_config_bp(pf, false); mutex_unlock(&mbox->lock); /* Disable RQs */ otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false); /*Dequeue all CQEs */ for (qidx = 0; qidx < qset->cq_cnt; qidx++) { cq = &qset->cq[qidx]; if (cq->cq_type == CQ_RX) otx2_cleanup_rx_cqes(pf, cq, qidx); else otx2_cleanup_tx_cqes(pf, cq); } otx2_free_sq_res(pf); /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); pool = &pf->qset.pool[pool_id]; page_pool_destroy(pool->page_pool); pool->page_pool = NULL; } otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ cn10k_free_all_ipolicers(pf); mutex_lock(&mbox->lock); /* Reset NIX LF */ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); if (free_req) { free_req->flags = NIX_LF_DISABLE_FLOWS; if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN)) free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG; if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free nixlf\n", __func__); } mutex_unlock(&mbox->lock); /* Disable NPA Pool and Aura hw context */ otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true); otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); otx2_aura_pool_free(pf); mutex_lock(&mbox->lock); /* Reset NPA LF */ req = otx2_mbox_alloc_msg_npa_lf_free(mbox); if (req) { if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free npalf\n", __func__); } mutex_unlock(&mbox->lock); } static void otx2_do_set_rx_mode(struct otx2_nic *pf) { struct net_device *netdev = pf->netdev; struct nix_rx_mode *req; bool promisc = false; if (!(netdev->flags & IFF_UP)) return; if ((netdev->flags & IFF_PROMISC) || (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { promisc = true; } /* Write unicast address to mcam entries or del from mcam */ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return; } req->mode = NIX_RX_MODE_UCAST; if (promisc) req->mode |= NIX_RX_MODE_PROMISC; if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; req->mode |= NIX_RX_MODE_USE_MCE; otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); } static void otx2_dim_work(struct work_struct *w) { struct dim_cq_moder cur_moder; struct otx2_cq_poll *cq_poll; struct otx2_nic *pfvf; struct dim *dim; dim = container_of(w, struct dim, work); cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); cq_poll = container_of(dim, struct otx2_cq_poll, dim); pfvf = (struct otx2_nic *)cq_poll->dev; pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ? CQ_TIMER_THRESH_MAX : cur_moder.usec; pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? NAPI_POLL_WEIGHT : cur_moder.pkts; dim->state = DIM_START_MEASURE; } int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; int err = 0, qidx, vec; char *irq_name; netif_carrier_off(netdev); /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, pf->hw.tc_tx_queues); pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf); qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL); if (!qset->napi) return -ENOMEM; /* CQ size of RQ */ qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256); /* CQ size of SQ */ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); err = -ENOMEM; qset->cq = kcalloc(pf->qset.cq_cnt, sizeof(struct otx2_cq_queue), GFP_KERNEL); if (!qset->cq) goto err_free_mem; qset->sq = kcalloc(otx2_get_total_tx_queues(pf), sizeof(struct otx2_snd_queue), GFP_KERNEL); if (!qset->sq) goto err_free_mem; qset->rq = kcalloc(pf->hw.rx_queues, sizeof(struct otx2_rcv_queue), GFP_KERNEL); if (!qset->rq) goto err_free_mem; err = otx2_init_hw_resources(pf); if (err) goto err_free_mem; /* Register NAPI handler */ for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { cq_poll = &qset->napi[qidx]; cq_poll->cint_idx = qidx; /* RQ0 & SQ0 are mapped to CINT0 and so on.. * 'cq_ids[0]' points to RQ's CQ and * 'cq_ids[1]' points to SQ's CQ and * 'cq_ids[2]' points to XDP's CQ and */ cq_poll->cq_ids[CQ_RX] = (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? qidx + pf->hw.rx_queues : CINT_INVALID_CQ; if (pf->xdp_prog) cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? (qidx + pf->hw.rx_queues + pf->hw.tx_queues) : CINT_INVALID_CQ; else cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ? (qidx + pf->hw.rx_queues + pf->hw.non_qos_queues) : CINT_INVALID_CQ; cq_poll->dev = (void *)pf; cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; INIT_WORK(&cq_poll->dim.work, otx2_dim_work); netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler); napi_enable(&cq_poll->napi); } /* Set maximum frame size allowed in HW */ err = otx2_hw_set_mtu(pf, netdev->mtu); if (err) goto err_disable_napi; /* Setup segmentation algorithms, if failed, clear offload capability */ otx2_setup_segmentation(pf); /* Initialize RSS */ err = otx2_rss_init(pf); if (err) goto err_disable_napi; /* Register Queue IRQ handlers */ vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START; irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name); err = request_irq(pci_irq_vector(pf->pdev, vec), otx2_q_intr_handler, 0, irq_name, pf); if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for QERR\n", rvu_get_pf(pf->pcifunc)); goto err_disable_napi; } /* Enable QINT IRQ */ otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); /* Register CQ IRQ handlers */ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, qidx); err = request_irq(pci_irq_vector(pf->pdev, vec), otx2_cq_intr_handler, 0, irq_name, &qset->napi[qidx]); if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d\n", rvu_get_pf(pf->pcifunc), qidx); goto err_free_cints; } vec++; otx2_config_irq_coalescing(pf, qidx); /* Enable CQ IRQ */ otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); } otx2_set_cints_affinity(pf); if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_enable_rxvlan(pf, true); /* When reinitializing enable time stamping if it is enabled before */ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; otx2_config_hw_tx_tstamp(pf, true); } if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) { pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; otx2_config_hw_rx_tstamp(pf, true); } pf->flags &= ~OTX2_FLAG_INTF_DOWN; /* 'intf_down' may be checked on any cpu */ smp_wmb(); /* Enable QoS configuration before starting tx queues */ otx2_qos_config_txschq(pf); /* we have already received link status notification */ if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK)) otx2_handle_link_event(pf); /* Install DMAC Filters */ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) otx2_dmacflt_reinstall_flows(pf); err = otx2_rxtx_enable(pf, true); /* If a mbox communication error happens at this point then interface * will end up in a state such that it is in down state but hardware * mcam entries are enabled to receive the packets. Hence disable the * packet I/O. */ if (err == EIO) goto err_disable_rxtx; else if (err) goto err_tx_stop_queues; otx2_do_set_rx_mode(pf); return 0; err_disable_rxtx: otx2_rxtx_enable(pf, false); err_tx_stop_queues: netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); pf->flags |= OTX2_FLAG_INTF_DOWN; err_free_cints: otx2_free_cints(pf, qidx); vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); free_irq(vec, pf); err_disable_napi: otx2_disable_napi(pf); otx2_free_hw_resources(pf); err_free_mem: kfree(qset->sq); kfree(qset->cq); kfree(qset->rq); kfree(qset->napi); return err; } EXPORT_SYMBOL(otx2_open); int otx2_stop(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; struct otx2_rss_info *rss; int qidx, vec, wrk; /* If the DOWN flag is set resources are already freed */ if (pf->flags & OTX2_FLAG_INTF_DOWN) return 0; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); pf->flags |= OTX2_FLAG_INTF_DOWN; /* 'intf_down' may be checked on any cpu */ smp_wmb(); /* First stop packet Rx/Tx */ otx2_rxtx_enable(pf, false); /* Clear RSS enable flag */ rss = &pf->hw.rss_info; rss->enable = false; /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); free_irq(vec, pf); /* Cleanup CQ NAPI and IRQ */ vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { /* Disable interrupt */ otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); synchronize_irq(pci_irq_vector(pf->pdev, vec)); cq_poll = &qset->napi[qidx]; napi_synchronize(&cq_poll->napi); vec++; } netif_tx_disable(netdev); for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); devm_kfree(pf->dev, pf->refill_wrk); otx2_free_hw_resources(pf); otx2_free_cints(pf, pf->hw.cint_cnt); otx2_disable_napi(pf); for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); kfree(qset->sq); kfree(qset->cq); kfree(qset->rq); kfree(qset->napi); /* Do not clear RQ/SQ ringsize settings */ memset_startat(qset, 0, sqe_cnt); return 0; } EXPORT_SYMBOL(otx2_stop); static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); int qidx = skb_get_queue_mapping(skb); struct otx2_snd_queue *sq; struct netdev_queue *txq; int sq_idx; /* XDP SQs are not mapped with TXQs * advance qid to derive correct sq mapped with QOS */ sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx; /* Check for minimum and maximum packet length */ if (skb->len <= ETH_HLEN || (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } sq = &pf->qset.sq[sq_idx]; txq = netdev_get_tx_queue(netdev, qidx); if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ smp_mb(); if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) > sq->sqe_thresh) netif_tx_wake_queue(txq); return NETDEV_TX_BUSY; } return NETDEV_TX_OK; } static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb, u16 htb_maj_id) { u16 classid; if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) classid = TC_H_MIN(skb->priority); else classid = READ_ONCE(pf->qos.defcls); if (!classid) return 0; return otx2_get_txq_by_classid(pf, classid); } u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, struct net_device *sb_dev) { struct otx2_nic *pf = netdev_priv(netdev); bool qos_enabled; #ifdef CONFIG_DCB u8 vlan_prio; #endif int txq; qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues; if (unlikely(qos_enabled)) { /* This smp_load_acquire() pairs with smp_store_release() in * otx2_qos_root_add() called from htb offload root creation */ u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id); if (unlikely(htb_maj_id)) { txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id); if (txq > 0) return txq; goto process_pfc; } } process_pfc: #ifdef CONFIG_DCB if (!skb_vlan_tag_present(skb)) goto pick_tx; vlan_prio = skb->vlan_tci >> 13; if ((vlan_prio > pf->hw.tx_queues - 1) || !pf->pfc_alloc_status[vlan_prio]) goto pick_tx; return vlan_prio; pick_tx: #endif txq = netdev_pick_tx(netdev, skb, NULL); if (unlikely(qos_enabled)) return txq % pf->hw.tx_queues; return txq; } EXPORT_SYMBOL(otx2_select_queue); static netdev_features_t otx2_fix_features(struct net_device *dev, netdev_features_t features) { if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_STAG_RX; else features &= ~NETIF_F_HW_VLAN_STAG_RX; return features; } static void otx2_set_rx_mode(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); queue_work(pf->otx2_wq, &pf->rx_mode_work); } static void otx2_rx_mode_wrk_handler(struct work_struct *work) { struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); otx2_do_set_rx_mode(pf); } static int otx2_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = features ^ netdev->features; struct otx2_nic *pf = netdev_priv(netdev); if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) return otx2_cgx_config_loopback(pf, features & NETIF_F_LOOPBACK); if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev)) return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); return otx2_handle_ntuple_tc_features(netdev, features); } static void otx2_reset_task(struct work_struct *work) { struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task); if (!netif_running(pf->netdev)) return; rtnl_lock(); otx2_stop(pf->netdev); pf->reset_count++; otx2_open(pf->netdev); netif_trans_update(pf->netdev); rtnl_unlock(); } static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable) { struct msg_req *req; int err; if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable) return 0; mutex_lock(&pfvf->mbox.lock); if (enable) req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox); else req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } mutex_unlock(&pfvf->mbox.lock); if (enable) pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED; else pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED; return 0; } static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) { struct msg_req *req; int err; if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable) return 0; mutex_lock(&pfvf->mbox.lock); if (enable) req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox); else req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pfvf->mbox); if (err) { mutex_unlock(&pfvf->mbox.lock); return err; } mutex_unlock(&pfvf->mbox.lock); if (enable) pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED; else pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; return 0; } int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config config; if (!pfvf->ptp) return -ENODEV; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; switch (config.tx_type) { case HWTSTAMP_TX_OFF: if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; cancel_delayed_work(&pfvf->ptp->synctstamp_work); otx2_config_hw_tx_tstamp(pfvf, false); break; case HWTSTAMP_TX_ONESTEP_SYNC: if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) return -ERANGE; pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; schedule_delayed_work(&pfvf->ptp->synctstamp_work, msecs_to_jiffies(500)); fallthrough; case HWTSTAMP_TX_ON: otx2_config_hw_tx_tstamp(pfvf, true); break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: otx2_config_hw_rx_tstamp(pfvf, false); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: otx2_config_hw_rx_tstamp(pfvf, true); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } memcpy(&pfvf->tstamp, &config, sizeof(config)); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } EXPORT_SYMBOL(otx2_config_hwtstamp); int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config *cfg = &pfvf->tstamp; switch (cmd) { case SIOCSHWTSTAMP: return otx2_config_hwtstamp(netdev, req); case SIOCGHWTSTAMP: return copy_to_user(req->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; default: return -EOPNOTSUPP; } } EXPORT_SYMBOL(otx2_ioctl); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { struct npc_install_flow_req *req; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); if (!req) { err = -ENOMEM; goto out; } ether_addr_copy(req->packet.dmac, mac); eth_broadcast_addr((u8 *)&req->mask.dmac); req->features = BIT_ULL(NPC_DMAC); req->channel = pf->hw.rx_chan_base; req->intf = NIX_INTF_RX; req->default_rule = 1; req->append = 1; req->vf = vf + 1; req->op = NIX_RX_ACTION_DEFAULT; err = otx2_sync_mbox_msg(&pf->mbox); out: mutex_unlock(&pf->mbox.lock); return err; } static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); struct pci_dev *pdev = pf->pdev; struct otx2_vf_config *config; int ret; if (!netif_running(netdev)) return -EAGAIN; if (vf >= pf->total_vfs) return -EINVAL; if (!is_valid_ether_addr(mac)) return -EINVAL; config = &pf->vf_configs[vf]; ether_addr_copy(config->mac, mac); ret = otx2_do_set_vf_mac(pf, vf, mac); if (ret == 0) dev_info(&pdev->dev, "Load/Reload VF driver\n"); return ret; } static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, __be16 proto) { struct otx2_flow_config *flow_cfg = pf->flow_cfg; struct nix_vtag_config_rsp *vtag_rsp; struct npc_delete_flow_req *del_req; struct nix_vtag_config *vtag_req; struct npc_install_flow_req *req; struct otx2_vf_config *config; int err = 0; u32 idx; config = &pf->vf_configs[vf]; if (!vlan && !config->vlan) goto out; mutex_lock(&pf->mbox.lock); /* free old tx vtag entry */ if (config->vlan) { vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); if (!vtag_req) { err = -ENOMEM; goto out; } vtag_req->cfg_type = 0; vtag_req->tx.free_vtag0 = 1; vtag_req->tx.vtag0_idx = config->tx_vtag_idx; err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; } if (!vlan && config->vlan) { /* rx */ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); if (!del_req) { err = -ENOMEM; goto out; } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); del_req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; /* tx */ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); if (!del_req) { err = -ENOMEM; goto out; } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); del_req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; err = otx2_sync_mbox_msg(&pf->mbox); goto out; } /* rx */ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); if (!req) { err = -ENOMEM; goto out; } idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; req->packet.vlan_tci = htons(vlan); req->mask.vlan_tci = htons(VLAN_VID_MASK); /* af fills the destination mac addr */ eth_broadcast_addr((u8 *)&req->mask.dmac); req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); req->channel = pf->hw.rx_chan_base; req->intf = NIX_INTF_RX; req->vf = vf + 1; req->op = NIX_RX_ACTION_DEFAULT; req->vtag0_valid = true; req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; req->set_cntr = 1; err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; /* tx */ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); if (!vtag_req) { err = -ENOMEM; goto out; } /* configure tx vtag params */ vtag_req->vtag_size = VTAGSIZE_T4; vtag_req->cfg_type = 0; /* tx vlan cfg */ vtag_req->tx.cfg_vtag0 = 1; vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan; err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp (&pf->mbox.mbox, 0, &vtag_req->hdr); if (IS_ERR(vtag_rsp)) { err = PTR_ERR(vtag_rsp); goto out; } config->tx_vtag_idx = vtag_rsp->vtag0_idx; req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); if (!req) { err = -ENOMEM; goto out; } eth_zero_addr((u8 *)&req->mask.dmac); idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx]; req->features = BIT_ULL(NPC_DMAC); req->channel = pf->hw.tx_chan_base; req->intf = NIX_INTF_TX; req->vf = vf + 1; req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT; req->vtag0_def = vtag_rsp->vtag0_idx; req->vtag0_op = VTAG_INSERT; req->set_cntr = 1; err = otx2_sync_mbox_msg(&pf->mbox); out: config->vlan = vlan; mutex_unlock(&pf->mbox.lock); return err; } static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 proto) { struct otx2_nic *pf = netdev_priv(netdev); struct pci_dev *pdev = pf->pdev; if (!netif_running(netdev)) return -EAGAIN; if (vf >= pci_num_vf(pdev)) return -EINVAL; /* qos is currently unsupported */ if (vlan >= VLAN_N_VID || qos) return -EINVAL; if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT)) return -EOPNOTSUPP; return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto); } static int otx2_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct otx2_nic *pf = netdev_priv(netdev); struct pci_dev *pdev = pf->pdev; struct otx2_vf_config *config; if (!netif_running(netdev)) return -EAGAIN; if (vf >= pci_num_vf(pdev)) return -EINVAL; config = &pf->vf_configs[vf]; ivi->vf = vf; ether_addr_copy(ivi->mac, config->mac); ivi->vlan = config->vlan; ivi->trusted = config->trusted; return 0; } static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, int qidx) { struct page *page; u64 dma_addr; int err = 0; dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), offset_in_page(xdpf->data), xdpf->len, DMA_TO_DEVICE); if (dma_mapping_error(pf->dev, dma_addr)) return -ENOMEM; err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); if (!err) { otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); page = virt_to_page(xdpf->data); put_page(page); return -ENOMEM; } return 0; } static int otx2_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **frames, u32 flags) { struct otx2_nic *pf = netdev_priv(netdev); int qidx = smp_processor_id(); struct otx2_snd_queue *sq; int drops = 0, i; if (!netif_running(netdev)) return -ENETDOWN; qidx += pf->hw.tx_queues; sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; /* Abort xmit if xdp queue is not */ if (unlikely(!sq)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; err = otx2_xdp_xmit_tx(pf, xdpf, qidx); if (err) drops++; } return n - drops; } static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) { struct net_device *dev = pf->netdev; bool if_up = netif_running(pf->netdev); struct bpf_prog *old_prog; if (prog && dev->mtu > MAX_XDP_MTU) { netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); return -EOPNOTSUPP; } if (if_up) otx2_stop(pf->netdev); old_prog = xchg(&pf->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); if (pf->xdp_prog) bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); /* Network stack and XDP shared same rx queues. * Use separate tx queues for XDP and network stack. */ if (pf->xdp_prog) { pf->hw.xdp_queues = pf->hw.rx_queues; xdp_features_set_redirect_target(dev, false); } else { pf->hw.xdp_queues = 0; xdp_features_clear_redirect_target(dev); } pf->hw.non_qos_queues += pf->hw.xdp_queues; if (if_up) otx2_open(pf->netdev); return 0; } static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) { struct otx2_nic *pf = netdev_priv(netdev); switch (xdp->command) { case XDP_SETUP_PROG: return otx2_xdp_setup(pf, xdp->prog); default: return -EINVAL; } } static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, int req_perm) { struct set_vf_perm *req; int rc; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox); if (!req) { rc = -ENOMEM; goto out; } /* Let AF reset VF permissions as sriov is disabled */ if (req_perm == OTX2_RESET_VF_PERM) { req->flags |= RESET_VF_PERM; } else if (req_perm == OTX2_TRUSTED_VF) { if (pf->vf_configs[vf].trusted) req->flags |= VF_TRUSTED; } req->vf = vf; rc = otx2_sync_mbox_msg(&pf->mbox); out: mutex_unlock(&pf->mbox.lock); return rc; } static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, bool enable) { struct otx2_nic *pf = netdev_priv(netdev); struct pci_dev *pdev = pf->pdev; int rc; if (vf >= pci_num_vf(pdev)) return -EINVAL; if (pf->vf_configs[vf].trusted == enable) return 0; pf->vf_configs[vf].trusted = enable; rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); if (rc) pf->vf_configs[vf].trusted = !enable; else netdev_info(pf->netdev, "VF %d is %strusted\n", vf, enable ? "" : "not "); return rc; } static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, .ndo_start_xmit = otx2_xmit, .ndo_select_queue = otx2_select_queue, .ndo_fix_features = otx2_fix_features, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2_change_mtu, .ndo_set_rx_mode = otx2_set_rx_mode, .ndo_set_features = otx2_set_features, .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, .ndo_eth_ioctl = otx2_ioctl, .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, .ndo_bpf = otx2_xdp, .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; static int otx2_wq_init(struct otx2_nic *pf) { pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); if (!pf->otx2_wq) return -ENOMEM; INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); INIT_WORK(&pf->reset_task, otx2_reset_task); return 0; } static int otx2_check_pf_usable(struct otx2_nic *nic) { u64 rev; rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM)); rev = (rev >> 12) & 0xFF; /* Check if AF has setup revision for RVUM block, * otherwise this driver probe should be deferred * until AF driver comes up. */ if (!rev) { dev_warn(nic->dev, "AF is not initialized, deferring probe\n"); return -EPROBE_DEFER; } return 0; } static int otx2_realloc_msix_vectors(struct otx2_nic *pf) { struct otx2_hw *hw = &pf->hw; int num_vec, err; /* NPA interrupts are inot registered, so alloc only * upto NIX vector offset. */ num_vec = hw->nix_msixoff; num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; otx2_disable_mbox_intr(pf); pci_free_irq_vectors(hw->pdev); err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n", __func__, num_vec); return err; } return otx2_register_mbox_intr(pf, false); } static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) { int i; pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs, sizeof(struct otx2_vf_config), GFP_KERNEL); if (!pf->vf_configs) return -ENOMEM; for (i = 0; i < pf->total_vfs; i++) { pf->vf_configs[i].pf = pf; pf->vf_configs[i].intf_down = true; pf->vf_configs[i].trusted = false; INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work, otx2_vf_link_event_task); } return 0; } static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf) { int i; if (!pf->vf_configs) return; for (i = 0; i < pf->total_vfs; i++) { cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work); otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM); } } static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; int err, qcount, qos_txqs; struct net_device *netdev; struct otx2_nic *pf; struct otx2_hw *hw; int num_vec; err = pcim_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; } err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); goto err_release_regions; } pci_set_master(pdev); /* Set number of queues */ qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); if (!netdev) { err = -ENOMEM; goto err_release_regions; } pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pf = netdev_priv(netdev); pf->netdev = netdev; pf->pdev = pdev; pf->dev = dev; pf->total_vfs = pci_sriov_get_totalvfs(pdev); pf->flags |= OTX2_FLAG_INTF_DOWN; hw = &pf->hw; hw->pdev = pdev; hw->rx_queues = qcount; hw->tx_queues = qcount; hw->non_qos_queues = qcount; hw->max_queues = qcount; hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; /* Use CQE of 128 byte descriptor size by default */ hw->xqe_size = 128; num_vec = pci_msix_vec_count(pdev); hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); if (!hw->irq_name) { err = -ENOMEM; goto err_free_netdev; } hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, sizeof(cpumask_var_t), GFP_KERNEL); if (!hw->affinity_mask) { err = -ENOMEM; goto err_free_netdev; } /* Map CSRs */ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!pf->reg_base) { dev_err(dev, "Unable to map physical function CSRs, aborting\n"); err = -ENOMEM; goto err_free_netdev; } err = otx2_check_pf_usable(pf); if (err) goto err_free_netdev; err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", __func__, num_vec); goto err_free_netdev; } otx2_setup_dev_hw_settings(pf); /* Init PF <=> AF mailbox stuff */ err = otx2_pfaf_mbox_init(pf); if (err) goto err_free_irq_vectors; /* Register mailbox interrupt */ err = otx2_register_mbox_intr(pf, true); if (err) goto err_mbox_destroy; /* Request AF to attach NPA and NIX LFs to this PF. * NIX and NPA LFs are needed for this PF to function as a NIC. */ err = otx2_attach_npa_nix(pf); if (err) goto err_disable_mbox_intr; err = otx2_realloc_msix_vectors(pf); if (err) goto err_detach_rsrc; err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); if (err) goto err_detach_rsrc; err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; /* Assign default mac address */ otx2_get_mac_from_af(netdev); /* Don't check for error. Proceed without ptp */ otx2_ptp_init(pf); /* NPA's pool is a stack to which SW frees buffer pointers via Aura. * HW allocates buffer pointer from stack and uses it for DMA'ing * ingress packet. In some scenarios HW can free back allocated buffer * pointers to pool. This makes it impossible for SW to maintain a * parallel list where physical addresses of buffer pointers (IOVAs) * given to HW can be saved for later reference. * * So the only way to convert Rx packet's buffer address is to use * IOMMU's iova_to_phys() handler which translates the address by * walking through the translation tables. */ pf->iommu_domain = iommu_get_domain_for_dev(dev); netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4); netdev->features |= netdev->hw_features; err = otx2_mcam_flow_init(pf); if (err) goto err_ptp_destroy; err = cn10k_mcs_init(pf); if (err) goto err_del_mcam_entries; if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) netdev->hw_features |= NETIF_F_NTUPLE; if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT) netdev->priv_flags |= IFF_UNICAST_FLT; /* Support TSO on tag interface */ netdev->vlan_features |= netdev->features; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; netdev->features |= netdev->hw_features; /* HW supports tc offload but mutually exclusive with n-tuple filters */ if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(pf); err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); goto err_mcs_free; } err = otx2_wq_init(pf); if (err) goto err_unreg_netdev; otx2_set_ethtool_ops(netdev); err = otx2_init_tc(pf); if (err) goto err_mcam_flow_del; err = otx2_register_dl(pf); if (err) goto err_mcam_flow_del; /* Initialize SR-IOV resources */ err = otx2_sriov_vfcfg_init(pf); if (err) goto err_pf_sriov_init; /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); #ifdef CONFIG_DCB err = otx2_dcbnl_set_ops(netdev); if (err) goto err_pf_sriov_init; #endif otx2_qos_init(pf, qos_txqs); return 0; err_pf_sriov_init: otx2_shutdown_tc(pf); err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); err_mcs_free: cn10k_mcs_free(pf); err_del_mcam_entries: otx2_mcam_flow_del(pf); err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: if (pf->hw.lmt_info) free_percpu(pf->hw.lmt_info); if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); err_mbox_destroy: otx2_pfaf_mbox_destroy(pf); err_free_irq_vectors: pci_free_irq_vectors(hw->pdev); err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); err_release_regions: pci_release_regions(pdev); return err; } static void otx2_vf_link_event_task(struct work_struct *work) { struct otx2_vf_config *config; struct cgx_link_info_msg *req; struct mbox_msghdr *msghdr; struct otx2_nic *pf; int vf_idx; config = container_of(work, struct otx2_vf_config, link_event_work.work); vf_idx = config - config->pf->vf_configs; pf = config->pf; msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, sizeof(*req), sizeof(struct msg_rsp)); if (!msghdr) { dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); return; } req = (struct cgx_link_info_msg *)msghdr; req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; req->hdr.sig = OTX2_MBOX_REQ_SIG; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); } static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) { struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *pf = netdev_priv(netdev); int ret; /* Init PF <=> VF mailbox stuff */ ret = otx2_pfvf_mbox_init(pf, numvfs); if (ret) return ret; ret = otx2_register_pfvf_mbox_intr(pf, numvfs); if (ret) goto free_mbox; ret = otx2_pf_flr_init(pf, numvfs); if (ret) goto free_intr; ret = otx2_register_flr_me_intr(pf, numvfs); if (ret) goto free_flr; ret = pci_enable_sriov(pdev, numvfs); if (ret) goto free_flr_intr; return numvfs; free_flr_intr: otx2_disable_flr_me_intr(pf); free_flr: otx2_flr_wq_destroy(pf); free_intr: otx2_disable_pfvf_mbox_intr(pf, numvfs); free_mbox: otx2_pfvf_mbox_destroy(pf); return ret; } static int otx2_sriov_disable(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *pf = netdev_priv(netdev); int numvfs = pci_num_vf(pdev); if (!numvfs) return 0; pci_disable_sriov(pdev); otx2_disable_flr_me_intr(pf); otx2_flr_wq_destroy(pf); otx2_disable_pfvf_mbox_intr(pf, numvfs); otx2_pfvf_mbox_destroy(pf); return 0; } static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) { if (numvfs == 0) return otx2_sriov_disable(pdev); else return otx2_sriov_enable(pdev, numvfs); } static void otx2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct otx2_nic *pf; if (!netdev) return; pf = netdev_priv(netdev); pf->flags |= OTX2_FLAG_PF_SHUTDOWN; if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) otx2_config_hw_tx_tstamp(pf, false); if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) otx2_config_hw_rx_tstamp(pf, false); /* Disable 802.3x pause frames */ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED || (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) { pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED; pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED; otx2_config_pause_frm(pf); } #ifdef CONFIG_DCB /* Disable PFC config */ if (pf->pfc_en) { pf->pfc_en = 0; otx2_config_priority_flow_ctrl(pf); } #endif cancel_work_sync(&pf->reset_task); /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); otx2_unregister_dl(pf); unregister_netdev(netdev); cn10k_mcs_free(pf); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); if (pf->otx2_wq) destroy_workqueue(pf->otx2_wq); otx2_ptp_destroy(pf); otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_shutdown_qos(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_info) free_percpu(pf->hw.lmt_info); if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); pci_release_regions(pdev); } static struct pci_driver otx2_pf_driver = { .name = DRV_NAME, .id_table = otx2_pf_id_table, .probe = otx2_probe, .shutdown = otx2_remove, .remove = otx2_remove, .sriov_configure = otx2_sriov_configure }; static int __init otx2_rvupf_init_module(void) { pr_info("%s: %s\n", DRV_NAME, DRV_STRING); return pci_register_driver(&otx2_pf_driver); } static void __exit otx2_rvupf_cleanup_module(void) { pci_unregister_driver(&otx2_pf_driver); } module_init(otx2_rvupf_init_module); module_exit(otx2_rvupf_cleanup_module);
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. * */ #include <linux/etherdevice.h> #include <net/ip.h> #include <net/tso.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/ip6_checksum.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "otx2_txrx.h" #include "otx2_ptp.h" #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) #define PTP_PORT 0x13F /* PTPv2 header Original Timestamp starts at byte offset 34 and * contains 6 byte seconds field and 4 byte nano seconds field. */ #define PTP_SYNC_SEC_OFFSET 34 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, bool *need_xdp_flush); static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { u64 incr = (u64)(cq->cq_idx) << 32; u64 status; status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { dev_err(pfvf->dev, "CQ stopped due to error"); return -EINVAL; } cq->cq_tail = status & 0xFFFFF; cq->cq_head = (status >> 20) & 0xFFFFF; if (cq->cq_tail < cq->cq_head) cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + cq->cq_tail; else cq->pend_cqe = cq->cq_tail - cq->cq_head; return 0; } static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) { struct nix_cqe_hdr_s *cqe_hdr; cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head); if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID) return NULL; cq->cq_head++; cq->cq_head &= (cq->cqe_cnt - 1); return cqe_hdr; } static unsigned int frag_num(unsigned int i) { #ifdef __BIG_ENDIAN return (i & ~3) + 3 - (i & 3); #else return i; #endif } static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, struct sk_buff *skb, int seg, int *len) { const skb_frag_t *frag; struct page *page; int offset; /* First segment is always skb->data */ if (!seg) { page = virt_to_page(skb->data); offset = offset_in_page(skb->data); *len = skb_headlen(skb); } else { frag = &skb_shinfo(skb)->frags[seg - 1]; page = skb_frag_page(frag); offset = skb_frag_off(frag); *len = skb_frag_size(frag); } return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); } static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) { int seg; for (seg = 0; seg < sg->num_segs; seg++) { otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], sg->size[seg], DMA_TO_DEVICE); } sg->num_segs = 0; } static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe) { struct nix_send_comp_s *snd_comp = &cqe->comp; struct sg_list *sg; struct page *page; u64 pa; sg = &sq->sg[snd_comp->sqe_id]; pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE); page = virt_to_page(phys_to_virt(pa)); put_page(page); } static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe, int budget, int *tx_pkts, int *tx_bytes) { struct nix_send_comp_s *snd_comp = &cqe->comp; struct skb_shared_hwtstamps ts; struct sk_buff *skb = NULL; u64 timestamp, tsns; struct sg_list *sg; int err; if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n", pfvf->netdev->name, cq->cint_idx, snd_comp->status); sg = &sq->sg[snd_comp->sqe_id]; skb = (struct sk_buff *)sg->skb; if (unlikely(!skb)) return; if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id]; if (timestamp != 1) { timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (!err) { memset(&ts, 0, sizeof(ts)); ts.hwtstamp = ns_to_ktime(tsns); skb_tstamp_tx(skb, &ts); } } } *tx_bytes += skb->len; (*tx_pkts)++; otx2_dma_unmap_skb_frags(pfvf, sg); napi_consume_skb(skb, budget); sg->skb = (u64)NULL; } static void otx2_set_rxtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, void *data) { u64 timestamp, tsns; int err; if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) return; timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); /* The first 8 bytes is the timestamp */ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); if (err) return; skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns); } static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, u64 iova, int len, struct nix_rx_parse_s *parse, int qidx) { struct page *page; int off = 0; void *va; va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); if (likely(!skb_shinfo(skb)->nr_frags)) { /* Check if data starts at some nonzero offset * from the start of the buffer. For now the * only possible offset is 8 bytes in the case * where packet is prepended by a timestamp. */ if (parse->laptr) { otx2_set_rxtstamp(pfvf, skb, va); off = OTX2_HW_TIMESTAMP_LEN; } } page = virt_to_page(va); if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, va - page_address(page) + off, len - off, pfvf->rbsize); return true; } /* If more than MAX_SKB_FRAGS fragments are received then * give back those buffer pointers to hardware for reuse. */ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); return false; } static void otx2_set_rxhash(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, struct sk_buff *skb) { enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; struct otx2_rss_info *rss; u32 hash = 0; if (!(pfvf->netdev->features & NETIF_F_RXHASH)) return; rss = &pfvf->hw.rss_info; if (rss->flowkey_cfg) { if (rss->flowkey_cfg & ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)) hash_type = PKT_HASH_TYPE_L4; else hash_type = PKT_HASH_TYPE_L3; hash = cqe->hdr.flow_tag; } skb_set_hash(skb, hash, hash_type); } static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, int qidx) { struct nix_rx_sg_s *sg = &cqe->sg; void *end, *start; u64 *seg_addr; int seg; start = (void *)sg; end = start + ((cqe->parse.desc_sizem1 + 1) * 16); while (start < end) { sg = (struct nix_rx_sg_s *)start; seg_addr = &sg->seg_addr; for (seg = 0; seg < sg->segs; seg++, seg_addr++) pfvf->hw_ops->aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL); start += sizeof(*sg); } } static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, int qidx) { struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; struct nix_rx_parse_s *parse = &cqe->parse; if (netif_msg_rx_err(pfvf)) netdev_err(pfvf->netdev, "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n", qidx, parse->errlev, parse->errcode); if (parse->errlev == NPC_ERRLVL_RE) { switch (parse->errcode) { case ERRCODE_FCS: case ERRCODE_FCS_RCV: atomic_inc(&stats->rx_fcs_errs); break; case ERRCODE_UNDERSIZE: atomic_inc(&stats->rx_undersize_errs); break; case ERRCODE_OVERSIZE: atomic_inc(&stats->rx_oversize_errs); break; case ERRCODE_OL2_LEN_MISMATCH: atomic_inc(&stats->rx_len_errs); break; default: atomic_inc(&stats->rx_other_errs); break; } } else if (parse->errlev == NPC_ERRLVL_NIX) { switch (parse->errcode) { case ERRCODE_OL3_LEN: case ERRCODE_OL4_LEN: case ERRCODE_IL3_LEN: case ERRCODE_IL4_LEN: atomic_inc(&stats->rx_len_errs); break; case ERRCODE_OL4_CSUM: case ERRCODE_IL4_CSUM: atomic_inc(&stats->rx_csum_errs); break; default: atomic_inc(&stats->rx_other_errs); break; } } else { atomic_inc(&stats->rx_other_errs); /* For now ignore all the NPC parser errors and * pass the packets to stack. */ return false; } /* If RXALL is enabled pass on packets to stack. */ if (pfvf->netdev->features & NETIF_F_RXALL) return false; /* Free buffer back to pool */ if (cqe->sg.segs) otx2_free_rcv_seg(pfvf, cqe, qidx); return true; } static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, struct nix_cqe_rx_s *cqe, bool *need_xdp_flush) { struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; void *end, *start; u64 *seg_addr; u16 *seg_size; int seg; if (unlikely(parse->errlev || parse->errcode)) { if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) return; } if (pfvf->xdp_prog) if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) return; skb = napi_get_frags(napi); if (unlikely(!skb)) return; start = (void *)sg; end = start + ((cqe->parse.desc_sizem1 + 1) * 16); while (start < end) { sg = (struct nix_rx_sg_s *)start; seg_addr = &sg->seg_addr; seg_size = (void *)sg; for (seg = 0; seg < sg->segs; seg++, seg_addr++) { if (otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg], parse, cq->cq_idx)) cq->pool_ptrs++; } start += sizeof(*sg); } otx2_set_rxhash(pfvf, cqe, skb); skb_record_rx_queue(skb, cq->cq_idx); if (pfvf->netdev->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; skb_mark_for_recycle(skb); napi_gro_frags(napi); } static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, int budget) { bool need_xdp_flush = false; struct nix_cqe_rx_s *cqe; int processed_cqe = 0; if (cq->pend_cqe >= budget) goto process_cqe; if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return 0; process_cqe: while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || !cqe->sg.seg_addr) { if (!processed_cqe) return 0; break; } cq->cq_head++; cq->cq_head &= (cq->cqe_cnt - 1); otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush); cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; cq->pend_cqe--; } if (need_xdp_flush) xdp_do_flush(); /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); return processed_cqe; } int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; int cnt = cq->pool_ptrs; dma_addr_t bufptr; while (cq->pool_ptrs) { if (otx2_alloc_buffer(pfvf, cq, &bufptr)) break; otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } return cnt - cq->pool_ptrs; } static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { int tx_pkts = 0, tx_bytes = 0, qidx; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; if (cq->pend_cqe >= budget) goto process_cqe; if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return 0; process_cqe: qidx = cq->cq_idx - pfvf->hw.rx_queues; sq = &pfvf->qset.sq[qidx]; while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { if (!processed_cqe) return 0; break; } qidx = cq->cq_idx - pfvf->hw.rx_queues; if (cq->cq_type == CQ_XDP) otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); else otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], cqe, budget, &tx_pkts, &tx_bytes); cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; cq->pend_cqe--; sq->cons_head++; sq->cons_head &= (sq->sqe_cnt - 1); } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); if (likely(tx_pkts)) { struct netdev_queue *txq; qidx = cq->cq_idx - pfvf->hw.rx_queues; if (qidx >= pfvf->hw.tx_queues) qidx -= pfvf->hw.xdp_queues; txq = netdev_get_tx_queue(pfvf->netdev, qidx); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); /* Check if queue was stopped earlier due to ring full */ smp_mb(); if (netif_tx_queue_stopped(txq) && netif_carrier_ok(pfvf->netdev)) netif_tx_wake_queue(txq); } return 0; } static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) { struct dim_sample dim_sample; u64 rx_frames, rx_bytes; rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + OTX2_GET_RX_STATS(RX_UCAST); rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); net_dim(&cq_poll->dim, dim_sample); } int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; int filled_cnt = -1; cq_poll = container_of(napi, struct otx2_cq_poll, napi); pfvf = (struct otx2_nic *)cq_poll->dev; qset = &pfvf->qset; for (i = 0; i < CQS_PER_CINT; i++) { cq_idx = cq_poll->cq_ids[i]; if (unlikely(cq_idx == CINT_INVALID_CQ)) continue; cq = &qset->cq[cq_idx]; if (cq->cq_type == CQ_RX) { rx_cq = cq; workdone += otx2_rx_napi_handler(pfvf, napi, cq, budget); } else { workdone += otx2_tx_napi_handler(pfvf, cq, budget); } } if (rx_cq && rx_cq->pool_ptrs) filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); if (workdone < budget && napi_complete_done(napi, workdone)) { /* If interface is going down, don't re-enable IRQ */ if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; /* Check for adaptive interrupt coalesce */ if (workdone != 0 && ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { /* Adjust irq coalese using net_dim */ otx2_adjust_adaptive_coalese(pfvf, cq_poll); /* Update irq coalescing */ for (i = 0; i < pfvf->hw.cint_cnt; i++) otx2_config_irq_coalescing(pfvf, i); } if (unlikely(!filled_cnt)) { struct refill_work *work; struct delayed_work *dwork; work = &pfvf->refill_wrk[cq->cq_idx]; dwork = &work->pool_refill_work; /* Schedule a task if no other task is running */ if (!cq->refill_task_sched) { work->napi = napi; cq->refill_task_sched = true; schedule_delayed_work(dwork, msecs_to_jiffies(100)); } } else { /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); } } return workdone; } void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { u64 status; /* Packet data stores should finish before SQE is flushed to HW */ dma_wmb(); do { memcpy(sq->lmt_addr, sq->sqe_base, size); status = otx2_lmt_flush(sq->io_addr); } while (status == 0); sq->head++; sq->head &= (sq->sqe_cnt - 1); } #define MAX_SEGS_PER_SG 3 /* Add SQE scatter/gather subdescriptor structure */ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int num_segs, int *offset) { struct nix_sqe_sg_s *sg = NULL; u64 dma_addr, *iova = NULL; u16 *sg_lens = NULL; int seg, len; sq->sg[sq->head].num_segs = 0; for (seg = 0; seg < num_segs; seg++) { if ((seg % MAX_SEGS_PER_SG) == 0) { sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); sg->ld_type = NIX_SEND_LDTYPE_LDD; sg->subdc = NIX_SUBDC_SG; sg->segs = 0; sg_lens = (void *)sg; iova = (void *)sg + sizeof(*sg); /* Next subdc always starts at a 16byte boundary. * So if sg->segs is whether 2 or 3, offset += 16bytes. */ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) *offset += sizeof(*sg) + (3 * sizeof(u64)); else *offset += sizeof(*sg) + sizeof(u64); } dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); if (dma_mapping_error(pfvf->dev, dma_addr)) return false; sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len; sg->segs++; *iova++ = dma_addr; /* Save DMA mapping info for later unmapping */ sq->sg[sq->head].dma_addr[seg] = dma_addr; sq->sg[sq->head].size[seg] = len; sq->sg[sq->head].num_segs++; } sq->sg[sq->head].skb = (u64)skb; return true; } /* Add SQE extended header subdescriptor */ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int *offset) { struct nix_sqe_ext_s *ext; ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset); ext->subdc = NIX_SUBDC_EXT; if (skb_shinfo(skb)->gso_size) { ext->lso = 1; ext->lso_sb = skb_tcp_all_headers(skb); ext->lso_mps = skb_shinfo(skb)->gso_size; /* Only TSOv4 and TSOv6 GSO offloads are supported */ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { ext->lso_format = pfvf->hw.lso_tsov4_idx; /* HW adds payload size to 'ip_hdr->tot_len' while * sending TSO segment, hence set payload length * in IP header of the packet to just header length. */ ip_hdr(skb)->tot_len = htons(ext->lso_sb - skb_network_offset(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { ext->lso_format = pfvf->hw.lso_tsov6_idx; ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); u16 iplen; ext->lso_sb = skb_transport_offset(skb) + sizeof(struct udphdr); /* HW adds payload size to length fields in IP and * UDP headers while segmentation, hence adjust the * lengths to just header sizes. */ iplen = htons(ext->lso_sb - skb_network_offset(skb)); if (l3_proto == htons(ETH_P_IP)) { ip_hdr(skb)->tot_len = iplen; ext->lso_format = pfvf->hw.lso_udpv4_idx; } else { ipv6_hdr(skb)->payload_len = iplen; ext->lso_format = pfvf->hw.lso_udpv6_idx; } udph->len = htons(sizeof(struct udphdr)); } } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { ext->tstmp = 1; } #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) if (skb_vlan_tag_present(skb)) { if (skb->vlan_proto == htons(ETH_P_8021Q)) { ext->vlan1_ins_ena = 1; ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; ext->vlan1_ins_tci = skb_vlan_tag_get(skb); } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { ext->vlan0_ins_ena = 1; ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; ext->vlan0_ins_tci = skb_vlan_tag_get(skb); } } *offset += sizeof(*ext); } static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, int alg, u64 iova, int ptp_offset, u64 base_ns, bool udp_csum_crt) { struct nix_sqe_mem_s *mem; mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset); mem->subdc = NIX_SUBDC_MEM; mem->alg = alg; mem->wmem = 1; /* wait for the memory operation */ mem->addr = iova; if (ptp_offset) { mem->start_offset = ptp_offset; mem->udp_csum_crt = !!udp_csum_crt; mem->base_ns = base_ns; mem->step_type = 1; } *offset += sizeof(*mem); } /* Add SQE header subdescriptor structure */ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_sqe_hdr_s *sqe_hdr, struct sk_buff *skb, u16 qidx) { int proto = 0; /* Check if SQE was framed before, if yes then no need to * set these constants again and again. */ if (!sqe_hdr->total) { /* Don't free Tx buffers to Aura */ sqe_hdr->df = 1; sqe_hdr->aura = sq->aura_id; /* Post a CQE Tx after pkt transmission */ sqe_hdr->pnc = 1; sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ? qidx + pfvf->hw.xdp_queues : qidx; } sqe_hdr->total = skb->len; /* Set SQE identifier which will be used later for freeing SKB */ sqe_hdr->sqe_id = sq->head; /* Offload TCP/UDP checksum to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { sqe_hdr->ol3ptr = skb_network_offset(skb); sqe_hdr->ol4ptr = skb_transport_offset(skb); /* get vlan protocol Ethertype */ if (eth_type_vlan(skb->protocol)) skb->protocol = vlan_get_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { proto = ip_hdr(skb)->protocol; /* In case of TSO, HW needs this to be explicitly set. * So set this always, instead of adding a check. */ sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM; } else if (skb->protocol == htons(ETH_P_IPV6)) { proto = ipv6_hdr(skb)->nexthdr; sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6; } if (proto == IPPROTO_TCP) sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; else if (proto == IPPROTO_UDP) sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM; } } static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int sqe, int hdr_len) { int num_segs = skb_shinfo(skb)->nr_frags + 1; struct sg_list *sg = &sq->sg[sqe]; u64 dma_addr; int seg, len; sg->num_segs = 0; /* Get payload length at skb->data */ len = skb_headlen(skb) - hdr_len; for (seg = 0; seg < num_segs; seg++) { /* Skip skb->data, if there is no payload */ if (!seg && !len) continue; dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); if (dma_mapping_error(pfvf->dev, dma_addr)) goto unmap; /* Save DMA mapping info for later unmapping */ sg->dma_addr[sg->num_segs] = dma_addr; sg->size[sg->num_segs] = len; sg->num_segs++; } return 0; unmap: otx2_dma_unmap_skb_frags(pfvf, sg); return -EINVAL; } static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, struct sk_buff *skb, int seg, u64 seg_addr, int hdr_len, int sqe) { struct sg_list *sg = &sq->sg[sqe]; const skb_frag_t *frag; int offset; if (seg < 0) return sg->dma_addr[0] + (seg_addr - (u64)skb->data); frag = &skb_shinfo(skb)->frags[seg]; offset = seg_addr - (u64)skb_frag_address(frag); if (skb_headlen(skb) - hdr_len) seg++; return sg->dma_addr[seg] + offset; } static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq, struct sg_list *list, int *offset) { struct nix_sqe_sg_s *sg = NULL; u16 *sg_lens = NULL; u64 *iova = NULL; int seg; /* Add SG descriptors with buffer addresses */ for (seg = 0; seg < list->num_segs; seg++) { if ((seg % MAX_SEGS_PER_SG) == 0) { sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); sg->ld_type = NIX_SEND_LDTYPE_LDD; sg->subdc = NIX_SUBDC_SG; sg->segs = 0; sg_lens = (void *)sg; iova = (void *)sg + sizeof(*sg); /* Next subdc always starts at a 16byte boundary. * So if sg->segs is whether 2 or 3, offset += 16bytes. */ if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) *offset += sizeof(*sg) + (3 * sizeof(u64)); else *offset += sizeof(*sg) + sizeof(u64); } sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg]; *iova++ = list->dma_addr[seg]; sg->segs++; } } static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); int hdr_len, tcp_data, seg_len, pkt_len, offset; struct nix_sqe_hdr_s *sqe_hdr; int first_sqe = sq->head; struct sg_list list; struct tso_t tso; hdr_len = tso_start(skb, &tso); /* Map SKB's fragments to DMA. * It's done here to avoid mapping for every TSO segment's packet. */ if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { dev_kfree_skb_any(skb); return; } netdev_tx_sent_queue(txq, skb->len); tcp_data = skb->len - hdr_len; while (tcp_data > 0) { char *hdr; seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data); tcp_data -= seg_len; /* Set SQE's SEND_HDR */ memset(sq->sqe_base, 0, sq->sqe_size); sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); offset = sizeof(*sqe_hdr); /* Add TSO segment's pkt header */ hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE); tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0); list.dma_addr[0] = sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE); list.size[0] = hdr_len; list.num_segs = 1; /* Add TSO segment's payload data fragments */ pkt_len = hdr_len; while (seg_len > 0) { int size; size = min_t(int, tso.size, seg_len); list.size[list.num_segs] = size; list.dma_addr[list.num_segs] = otx2_tso_frag_dma_addr(sq, skb, tso.next_frag_idx - 1, (u64)tso.data, hdr_len, first_sqe); list.num_segs++; pkt_len += size; seg_len -= size; tso_build_data(skb, &tso, size); } sqe_hdr->total = pkt_len; otx2_sqe_tso_add_sg(sq, &list, &offset); /* DMA mappings and skb needs to be freed only after last * TSO segment is transmitted out. So set 'PNC' only for * last segment. Also point last segment's sqe_id to first * segment's SQE index where skb address and DMA mappings * are saved. */ if (!tcp_data) { sqe_hdr->pnc = 1; sqe_hdr->sqe_id = first_sqe; sq->sg[first_sqe].skb = (u64)skb; } else { sqe_hdr->pnc = 0; } sqe_hdr->sizem1 = (offset / 16) - 1; /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); } } static bool is_hw_tso_supported(struct otx2_nic *pfvf, struct sk_buff *skb) { int payload_len, last_seg_size; if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) return true; /* On 96xx A0, HW TSO not supported */ if (!is_96xx_B0(pfvf->pdev)) return false; /* HW has an issue due to which when the payload of the last LSO * segment is shorter than 16 bytes, some header fields may not * be correctly modified, hence don't offload such TSO segments. */ payload_len = skb->len - skb_tcp_all_headers(skb); last_seg_size = payload_len % skb_shinfo(skb)->gso_size; if (last_seg_size && last_seg_size < 16) return false; return true; } static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) { if (!skb_shinfo(skb)->gso_size) return 1; /* HW TSO */ if (is_hw_tso_supported(pfvf, skb)) return 1; /* SW TSO */ return skb_shinfo(skb)->gso_segs; } static bool otx2_validate_network_transport(struct sk_buff *skb) { if ((ip_hdr(skb)->protocol == IPPROTO_UDP) || (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) { struct udphdr *udph = udp_hdr(skb); if (udph->source == htons(PTP_PORT) && udph->dest == htons(PTP_PORT)) return true; } return false; } static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); u16 nix_offload_hlen = 0, inner_vhlen = 0; bool udp_hdr_present = false, is_sync; u8 *data = skb->data, *msgtype; __be16 proto = eth->h_proto; int network_depth = 0; /* NIX is programmed to offload outer VLAN header * in case of single vlan protocol field holds Network header ETH_IP/V6 * in case of stacked vlan protocol field holds Inner vlan (8100) */ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX && skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) { if (skb->vlan_proto == htons(ETH_P_8021AD)) { /* Get vlan protocol */ proto = __vlan_get_protocol(skb, eth->h_proto, NULL); /* SKB APIs like skb_transport_offset does not include * offloaded vlan header length. Need to explicitly add * the length */ nix_offload_hlen = VLAN_HLEN; inner_vhlen = VLAN_HLEN; } else if (skb->vlan_proto == htons(ETH_P_8021Q)) { nix_offload_hlen = VLAN_HLEN; } } else if (eth_type_vlan(eth->h_proto)) { proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); } switch (ntohs(proto)) { case ETH_P_1588: if (network_depth) *offset = network_depth; else *offset = ETH_HLEN + nix_offload_hlen + inner_vhlen; break; case ETH_P_IP: case ETH_P_IPV6: if (!otx2_validate_network_transport(skb)) return false; *offset = nix_offload_hlen + skb_transport_offset(skb) + sizeof(struct udphdr); udp_hdr_present = true; } msgtype = data + *offset; /* Check PTP messageId is SYNC or not */ is_sync = !(*msgtype & 0xf); if (is_sync) *udp_csum_crt = udp_hdr_present; else *offset = 0; return is_sync; } static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, struct otx2_snd_queue *sq, int *offset) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct ptpv2_tstamp *origin_tstamp; bool udp_csum_crt = false; unsigned int udphoff; struct timespec64 ts; int ptp_offset = 0; __wsum skb_csum; u64 iova; if (unlikely(!skb_shinfo(skb)->gso_size && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC && otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) { origin_tstamp = (struct ptpv2_tstamp *) ((u8 *)skb->data + ptp_offset + PTP_SYNC_SEC_OFFSET); ts = ns_to_timespec64(pfvf->ptp->tstamp); origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); origin_tstamp->nanoseconds = htonl(ts.tv_nsec); /* Point to correction field in PTP packet */ ptp_offset += 8; /* When user disables hw checksum, stack calculates the csum, * but it does not cover ptp timestamp which is added later. * Recalculate the checksum manually considering the timestamp. */ if (udp_csum_crt) { struct udphdr *uh = udp_hdr(skb); if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) { udphoff = skb_transport_offset(skb); uh->check = 0; skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); if (ntohs(eth->h_proto) == ETH_P_IPV6) uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - udphoff, ipv6_hdr(skb)->nexthdr, skb_csum); else uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - udphoff, IPPROTO_UDP, skb_csum); } } } else { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } iova = sq->timestamps->iova + (sq->head * sizeof(u64)); otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova, ptp_offset, pfvf->ptp->base_ns, udp_csum_crt); } else { skb_tx_timestamp(skb); } } bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); struct otx2_nic *pfvf = netdev_priv(netdev); int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; /* Check if there is enough room between producer * and consumer index. */ free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); if (free_desc < sq->sqe_thresh) return false; if (free_desc < otx2_get_sqe_count(pfvf, skb)) return false; num_segs = skb_shinfo(skb)->nr_frags + 1; /* If SKB doesn't fit in a single SQE, linearize it. * TODO: Consider adding JUMP descriptor instead. */ if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); return true; } num_segs = skb_shinfo(skb)->nr_frags + 1; } if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { /* Insert vlan tag before giving pkt to tso */ if (skb_vlan_tag_present(skb)) skb = __vlan_hwaccel_push_inside(skb); otx2_sq_append_tso(pfvf, sq, skb, qidx); return true; } /* Set SQE's SEND_HDR. * Do not clear the first 64bit as it contains constant info. */ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); offset = sizeof(*sqe_hdr); /* Add extended header if needed */ otx2_sqe_add_ext(pfvf, sq, skb, &offset); /* Add SG subdesc with data frags */ if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); return false; } otx2_set_txtstamp(pfvf, skb, sq, &offset); sqe_hdr->sizem1 = (offset / 16) - 1; netdev_tx_sent_queue(txq, skb->len); /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); return true; } EXPORT_SYMBOL(otx2_sq_append_skb); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) { struct nix_cqe_rx_s *cqe; struct otx2_pool *pool; int processed_cqe = 0; u16 pool_id; u64 iova; if (pfvf->xdp_prog) xdp_rxq_info_unreg(&cq->xdp_rxq); if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); pool = &pfvf->qset.pool[pool_id]; while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; cq->pend_cqe--; if (!cqe) continue; if (cqe->sg.segs > 1) { otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); continue; } iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); } void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { struct sk_buff *skb = NULL; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; struct sg_list *sg; int qidx; qidx = cq->cq_idx - pfvf->hw.rx_queues; sq = &pfvf->qset.sq[qidx]; if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; while (cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); processed_cqe++; cq->pend_cqe--; if (!cqe) continue; sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { otx2_dma_unmap_skb_frags(pfvf, sg); dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); } int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) { struct msg_req *msg; int err; mutex_lock(&pfvf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); else msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); if (!msg) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, int len, int *offset) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); sg->ld_type = NIX_SEND_LDTYPE_LDD; sg->subdc = NIX_SUBDC_SG; sg->segs = 1; sg->seg1_size = len; iova = (void *)sg + sizeof(*sg); *iova = dma_addr; *offset += sizeof(*sg) + sizeof(u64); sq->sg[sq->head].dma_addr[0] = dma_addr; sq->sg[sq->head].size[0] = len; sq->sg[sq->head].num_segs = 1; } bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) { struct nix_sqe_hdr_s *sqe_hdr; struct otx2_snd_queue *sq; int offset, free_sqe; sq = &pfvf->qset.sq[qidx]; free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; if (free_sqe < sq->sqe_thresh) return false; memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); if (!sqe_hdr->total) { sqe_hdr->aura = sq->aura_id; sqe_hdr->df = 1; sqe_hdr->sq = qidx; sqe_hdr->pnc = 1; } sqe_hdr->total = len; sqe_hdr->sqe_id = sq->head; offset = sizeof(*sqe_hdr); otx2_xdp_sqe_add_sg(sq, iova, len, &offset); sqe_hdr->sizem1 = (offset / 16) - 1; pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); return true; } static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, bool *need_xdp_flush) { unsigned char *hard_start, *data; int qidx = cq->cq_idx; struct xdp_buff xdp; struct page *page; u64 iova, pa; u32 act; int err; iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_page(phys_to_virt(pa)); xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); data = (unsigned char *)phys_to_virt(pa); hard_start = page_address(page); xdp_prepare_buff(&xdp, hard_start, data - hard_start, cqe->sg.seg_size, false); act = bpf_prog_run_xdp(prog, &xdp); switch (act) { case XDP_PASS: break; case XDP_TX: qidx += pfvf->hw.tx_queues; cq->pool_ptrs++; return otx2_xdp_sq_append_pkt(pfvf, iova, cqe->sg.seg_size, qidx); case XDP_REDIRECT: cq->pool_ptrs++; err = xdp_do_redirect(pfvf->netdev, &xdp, prog); otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); if (!err) { *need_xdp_flush = true; return true; } put_page(page); break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: trace_xdp_exception(pfvf->netdev, prog, act); break; case XDP_DROP: otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); put_page(page); cq->pool_ptrs++; return true; } return false; }
linux-master
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2021 Marvell. * */ #include <linux/pci.h> #include "rvu.h" /* SDP PF device id */ #define PCI_DEVID_OTX2_SDP_PF 0xA0F6 /* Maximum SDP blocks in a chip */ #define MAX_SDP 2 /* SDP PF number */ static int sdp_pf_num[MAX_SDP] = {-1, -1}; bool is_sdp_pfvf(u16 pcifunc) { u16 pf = rvu_get_pf(pcifunc); u32 found = 0, i = 0; while (i < MAX_SDP) { if (pf == sdp_pf_num[i]) found = 1; i++; } if (!found) return false; return true; } bool is_sdp_pf(u16 pcifunc) { return (is_sdp_pfvf(pcifunc) && !(pcifunc & RVU_PFVF_FUNC_MASK)); } bool is_sdp_vf(u16 pcifunc) { return (is_sdp_pfvf(pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } int rvu_sdp_init(struct rvu *rvu) { struct pci_dev *pdev = NULL; struct rvu_pfvf *pfvf; u32 i = 0; while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OTX2_SDP_PF, pdev)) != NULL) { /* The RVU PF number is one less than bus number */ sdp_pf_num[i] = pdev->bus->number - 1; pfvf = &rvu->pf[sdp_pf_num[i]]; pfvf->sdp_info = devm_kzalloc(rvu->dev, sizeof(struct sdp_node_info), GFP_KERNEL); if (!pfvf->sdp_info) { pci_dev_put(pdev); return -ENOMEM; } dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); i++; } pci_dev_put(pdev); return 0; } int rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, struct sdp_chan_info_msg *req, struct msg_rsp *rsp) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info)); dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", req->info.node_id, req->info.max_vfs, req->info.num_pf_rings, req->info.pf_srn); return 0; } int rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, struct sdp_get_chan_info_msg *rsp) { struct rvu_hwinfo *hw = rvu->hw; int blkaddr; if (!hw->cap.programmable_chans) { rsp->chan_base = NIX_CHAN_SDP_CH_START; rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS; } else { blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); rsp->chan_base = hw->sdp_chan_base; rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL; } return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTx2 CGX driver * * Copyright (C) 2018 Marvell. * */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include "cgx.h" #include "rvu.h" #include "lmac_common.h" #define DRV_NAME "Marvell-CGX/RPM" #define DRV_STRING "Marvell CGX/RPM Driver" static LIST_HEAD(cgx_list); /* Convert firmware speed encoding to user format(Mbps) */ static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = { [CGX_LINK_NONE] = 0, [CGX_LINK_10M] = 10, [CGX_LINK_100M] = 100, [CGX_LINK_1G] = 1000, [CGX_LINK_2HG] = 2500, [CGX_LINK_5G] = 5000, [CGX_LINK_10G] = 10000, [CGX_LINK_20G] = 20000, [CGX_LINK_25G] = 25000, [CGX_LINK_40G] = 40000, [CGX_LINK_50G] = 50000, [CGX_LINK_80G] = 80000, [CGX_LINK_100G] = 100000, }; /* Convert firmware lmac type encoding to string */ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { [LMAC_MODE_SGMII] = "SGMII", [LMAC_MODE_XAUI] = "XAUI", [LMAC_MODE_RXAUI] = "RXAUI", [LMAC_MODE_10G_R] = "10G_R", [LMAC_MODE_40G_R] = "40G_R", [LMAC_MODE_QSGMII] = "QSGMII", [LMAC_MODE_25G_R] = "25G_R", [LMAC_MODE_50G_R] = "50G_R", [LMAC_MODE_100G_R] = "100G_R", [LMAC_MODE_USXGMII] = "USXGMII", [LMAC_MODE_USGMII] = "USGMII", }; /* CGX PHY management internal APIs */ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, { 0, } /* end of table */ }; MODULE_DEVICE_TABLE(pci, cgx_id_table); static bool is_dev_rpm(void *cgxd) { struct cgx *cgx = cgxd; return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); } bool is_lmac_valid(struct cgx *cgx, int lmac_id) { if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) return false; return test_bit(lmac_id, &cgx->lmac_bmap); } /* Helper function to get sequential index * given the enabled LMAC of a CGX */ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) { int tmp, id = 0; for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { if (tmp == lmac_id) break; id++; } return id; } struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) return cgxd; return ((struct cgx *)cgxd)->mac_ops; } void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) { writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + offset); } u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) { return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + offset); } struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) { if (!cgx || lmac_id >= cgx->max_lmac_per_mac) return NULL; return cgx->lmac_idmap[lmac_id]; } int cgx_get_cgxcnt_max(void) { struct cgx *cgx_dev; int idmax = -ENODEV; list_for_each_entry(cgx_dev, &cgx_list, cgx_list) if (cgx_dev->cgx_id > idmax) idmax = cgx_dev->cgx_id; if (idmax < 0) return 0; return idmax + 1; } int cgx_get_lmac_cnt(void *cgxd) { struct cgx *cgx = cgxd; if (!cgx) return -ENODEV; return cgx->lmac_count; } void *cgx_get_pdata(int cgx_id) { struct cgx *cgx_dev; list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { if (cgx_dev->cgx_id == cgx_id) return cgx_dev; } return NULL; } void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); /* Software must not access disabled LMAC registers */ if (!is_lmac_valid(cgx_dev, lmac_id)) return; cgx_write(cgx_dev, lmac_id, offset, val); } u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); /* Software must not access disabled LMAC registers */ if (!is_lmac_valid(cgx_dev, lmac_id)) return 0; return cgx_read(cgx_dev, lmac_id, offset); } int cgx_get_cgxid(void *cgxd) { struct cgx *cgx = cgxd; if (!cgx) return -EINVAL; return cgx->cgx_id; } u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); u64 cfg; cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG); return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; } /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns * and could make the link status appear wrong. */ int cgx_get_link_info(void *cgxd, int lmac_id, struct cgx_link_user_info *linfo) { struct lmac *lmac = lmac_pdata(lmac_id, cgxd); if (!lmac) return -ENODEV; *linfo = lmac->link_info; return 0; } int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; int index, id; u64 cfg; if (!lmac) return -ENODEV; /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = ether_addr_to_u64(mac_addr); id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max; cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) { struct mac_ops *mac_ops; struct cgx *cgx = cgxd; if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) return 0; cgx = cgxd; /* Get mac_ops to know csr offset */ mac_ops = cgx->mac_ops; return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); } u64 cgx_read_dmac_entry(void *cgxd, int index) { struct mac_ops *mac_ops; struct cgx *cgx; if (!cgxd) return 0; cgx = cgxd; mac_ops = cgx->mac_ops; return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); } int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; int index, idx; u64 cfg = 0; int id; if (!lmac) return -ENODEV; mac_ops = cgx_dev->mac_ops; /* Get available index where entry is to be installed */ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); if (idx < 0) return idx; id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max + idx; cfg = ether_addr_to_u64(mac_addr); cfg |= CGX_DMAC_CAM_ADDR_ENABLE; cfg |= ((u64)lmac_id << 49); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); if (is_multicast_ether_addr(mac_addr)) { cfg &= ~GENMASK_ULL(2, 1); cfg |= CGX_DMAC_MCAST_MODE_CAM; lmac->mcast_filters_count++; } else if (!lmac->mcast_filters_count) { cfg |= CGX_DMAC_MCAST_MODE; } cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return idx; } int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; u8 index = 0, id; u64 cfg; if (!lmac) return -ENODEV; mac_ops = cgx_dev->mac_ops; /* Restore index 0 to its default init value as done during * cgx_lmac_init */ set_bit(0, lmac->mac_to_index_bmap.bmap); id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max + index; cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg &= ~CGX_DMAC_CAM_ACCEPT; cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } /* Allows caller to change macaddress associated with index * in dmac filter table including index 0 reserved for * interface mac address */ int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct mac_ops *mac_ops; struct lmac *lmac; u64 cfg; int id; lmac = lmac_pdata(lmac_id, cgx_dev); if (!lmac) return -ENODEV; mac_ops = cgx_dev->mac_ops; /* Validate the index */ if (index >= lmac->mac_to_index_bmap.max) return -EINVAL; /* ensure index is already set */ if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) return -EINVAL; id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max + index; cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); cfg &= ~CGX_RX_DMAC_ADR_MASK; cfg |= ether_addr_to_u64(mac_addr); cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); return 0; } int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; u8 mac[ETH_ALEN]; u64 cfg; int id; if (!lmac) return -ENODEV; mac_ops = cgx_dev->mac_ops; /* Validate the index */ if (index >= lmac->mac_to_index_bmap.max) return -EINVAL; /* Skip deletion for reserved index i.e. index 0 */ if (index == 0) return 0; rvu_free_rsrc(&lmac->mac_to_index_bmap, index); id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max + index; /* Read MAC address to check whether it is ucast or mcast */ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); u64_to_ether_addr(cfg, mac); if (is_multicast_ether_addr(mac)) lmac->mcast_filters_count--; if (!lmac->mcast_filters_count) { cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg &= ~GENMASK_ULL(2, 1); cfg |= CGX_DMAC_MCAST_MODE; cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); } cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); return 0; } int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); if (lmac) return lmac->mac_to_index_bmap.max; return 0; } u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; int index; u64 cfg; int id; mac_ops = cgx_dev->mac_ops; id = get_sequence_id_of_lmac(cgx_dev, lmac_id); index = id * lmac->mac_to_index_bmap.max; cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) { struct cgx *cgx = cgxd; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); return 0; } static u8 cgx_get_lmac_type(void *cgxd, int lmac_id) { struct cgx *cgx = cgxd; u64 cfg; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; } static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) { struct cgx *cgx = cgxd; u8 num_lmacs; u32 fifo_len; fifo_len = cgx->mac_ops->fifo_len; num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); switch (num_lmacs) { case 1: return fifo_len; case 2: return fifo_len / 2; case 3: /* LMAC0 gets half of the FIFO, reset 1/4th */ if (lmac_id == 0) return fifo_len / 2; return fifo_len / 4; case 4: default: return fifo_len / 4; } return 0; } /* Configure CGX LMAC in internal loopback mode */ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; struct lmac *lmac; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; lmac = lmac_pdata(lmac_id, cgx); if (lmac->lmac_type == LMAC_MODE_SGMII || lmac->lmac_type == LMAC_MODE_QSGMII) { cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); if (enable) cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; else cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK; cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg); } else { cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1); if (enable) cfg |= CGXX_SPUX_CONTROL1_LBK; else cfg &= ~CGXX_SPUX_CONTROL1_LBK; cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg); } return 0; } void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); struct lmac *lmac = lmac_pdata(lmac_id, cgx); struct mac_ops *mac_ops; u16 max_dmac; int index, i; u64 cfg = 0; int id; if (!cgx || !lmac) return; max_dmac = lmac->mac_to_index_bmap.max; id = get_sequence_id_of_lmac(cgx, lmac_id); mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg &= ~CGX_DMAC_CAM_ACCEPT; cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); for (i = 0; i < max_dmac; i++) { index = id * max_dmac + i; cfg = cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; cgx_write(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); for (i = 0; i < max_dmac; i++) { index = id * max_dmac + i; cfg = cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { cfg |= CGX_DMAC_CAM_ADDR_ENABLE; cgx_write(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); } } } } static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause, u8 *rx_pause) { struct cgx *cgx = cgxd; u64 cfg; if (is_dev_rpm(cgx)) return 0; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); return 0; } /* Enable or disable forwarding received pause frames to Tx block */ void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u8 rx_pause, tx_pause; bool is_pfc_enabled; struct lmac *lmac; u64 cfg; if (!cgx) return; lmac = lmac_pdata(lmac_id, cgx); if (!lmac) return; /* Pause frames are not enabled just return */ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) return; cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause); is_pfc_enabled = rx_pause ? false : true; if (enable) { if (!is_pfc_enabled) { cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); } else { cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN; cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); } } else { if (!is_pfc_enabled) { cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); } else { cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN; cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); } } } int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) { struct cgx *cgx = cgxd; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) { struct cgx *cgx = cgxd; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); return 0; } u64 cgx_features_get(void *cgxd) { return ((struct cgx *)cgxd)->hw_features; } static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) { if (!linfo->fec) return 0; switch (linfo->lmac_type_id) { case LMAC_MODE_SGMII: case LMAC_MODE_XAUI: case LMAC_MODE_RXAUI: case LMAC_MODE_QSGMII: return 0; case LMAC_MODE_10G_R: case LMAC_MODE_25G_R: case LMAC_MODE_100G_R: case LMAC_MODE_USXGMII: return 1; case LMAC_MODE_40G_R: return 4; case LMAC_MODE_50G_R: if (linfo->fec == OTX2_FEC_BASER) return 2; else return 1; default: return 0; } } int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) { int stats, fec_stats_count = 0; int corr_reg, uncorr_reg; struct cgx *cgx = cgxd; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) return 0; fec_stats_count = cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS; uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS; } else { corr_reg = CGXX_SPUX_RSFEC_CORR; uncorr_reg = CGXX_SPUX_RSFEC_UNCORR; } for (stats = 0; stats < fec_stats_count; stats++) { rsp->fec_corr_blks += cgx_read(cgx, lmac_id, corr_reg + (stats * 8)); rsp->fec_uncorr_blks += cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8)); } return 0; } int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); if (enable) cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; else cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); return 0; } int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u64 cfg, last; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); last = cfg; if (enable) cfg |= DATA_PKT_TX_EN; else cfg &= ~DATA_PKT_TX_EN; if (cfg != last) cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); return !!(last & DATA_PKT_TX_EN); } static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause) { struct cgx *cgx = cgxd; u64 cfg; if (is_dev_rpm(cgx)) return 0; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0; cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); if (tx_pause) { cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id); } else { cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); } cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); return 0; } static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return; if (enable) { /* Set pause time and interval */ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL); cfg &= ~0xFFFFULL; cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, cfg | (DEFAULT_PAUSE_TIME / 2)); cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL); cfg &= ~0xFFFFULL; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL, cfg | (DEFAULT_PAUSE_TIME / 2)); } /* ALL pause frames received are completely ignored */ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); /* Disable pause frames transmission */ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); /* Disable all PFC classes by default */ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); } int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, int pfvf_idx) { struct cgx *cgx = cgxd; struct lmac *lmac; lmac = lmac_pdata(lmac_id, cgx); if (!lmac) return -ENODEV; if (!rx_pause) clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); else set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); if (!tx_pause) clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); else set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); /* check if other pfvfs are using flow control */ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) { dev_warn(&cgx->pdev->dev, "Receive Flow control disable not permitted as its used by other PFVFs\n"); return -EPERM; } if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) { dev_warn(&cgx->pdev->dev, "Transmit Flow control disable not permitted as its used by other PFVFs\n"); return -EPERM; } return 0; } int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en) { struct cgx *cgx = cgxd; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; /* Return as no traffic classes are requested */ if (tx_pause && !pfc_en) return 0; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg); if (rx_pause) { cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN | CGXX_SMUX_CBFC_CTL_BCK_EN | CGXX_SMUX_CBFC_CTL_DRP_EN); } else { cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN | CGXX_SMUX_CBFC_CTL_BCK_EN | CGXX_SMUX_CBFC_CTL_DRP_EN); } if (tx_pause) { cfg |= CGXX_SMUX_CBFC_CTL_TX_EN; cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg); } else { cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN; cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); } cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); /* Write source MAC address which will be filled into PFC packet */ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id); cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg); return 0; } int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, u8 *rx_pause) { struct cgx *cgx = cgxd; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN); *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN); return 0; } void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u64 cfg; if (!cgx) return; if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); } else { /* Disable inbound PTP stamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE; cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); } } /* CGX Firmware interface low level support */ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) { struct cgx *cgx = lmac->cgx; struct device *dev; int err = 0; u64 cmd; /* Ensure no other command is in progress */ err = mutex_lock_interruptible(&lmac->cmd_lock); if (err) return err; /* Ensure command register is free */ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { err = -EBUSY; goto unlock; } /* Update ownership in command request */ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); /* Mark this lmac as pending, before we start */ lmac->cmd_pend = true; /* Start command in hardware */ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); /* Ensure command is completed without errors */ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, msecs_to_jiffies(CGX_CMD_TIMEOUT))) { dev = &cgx->pdev->dev; dev_err(dev, "cgx port %d:%d cmd %lld timeout\n", cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req)); err = LMAC_AF_ERR_CMD_TIMEOUT; goto unlock; } /* we have a valid command response */ smp_rmb(); /* Ensure the latest updates are visible */ *resp = lmac->resp; unlock: mutex_unlock(&lmac->cmd_lock); return err; } int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) { struct lmac *lmac; int err; lmac = lmac_pdata(lmac_id, cgx); if (!lmac) return -ENODEV; err = cgx_fwi_cmd_send(req, resp, lmac); /* Check for valid response */ if (!err) { if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) return -EIO; else return 0; } return err; } static int cgx_link_usertable_index_map(int speed) { switch (speed) { case SPEED_10: return CGX_LINK_10M; case SPEED_100: return CGX_LINK_100M; case SPEED_1000: return CGX_LINK_1G; case SPEED_2500: return CGX_LINK_2HG; case SPEED_5000: return CGX_LINK_5G; case SPEED_10000: return CGX_LINK_10G; case SPEED_20000: return CGX_LINK_20G; case SPEED_25000: return CGX_LINK_25G; case SPEED_40000: return CGX_LINK_40G; case SPEED_50000: return CGX_LINK_50G; case 80000: return CGX_LINK_80G; case SPEED_100000: return CGX_LINK_100G; case SPEED_UNKNOWN: return CGX_LINK_NONE; } return CGX_LINK_NONE; } static void set_mod_args(struct cgx_set_link_mode_args *args, u32 speed, u8 duplex, u8 autoneg, u64 mode) { /* Fill default values incase of user did not pass * valid parameters */ if (args->duplex == DUPLEX_UNKNOWN) args->duplex = duplex; if (args->speed == SPEED_UNKNOWN) args->speed = speed; if (args->an == AUTONEG_UNKNOWN) args->an = autoneg; args->mode = mode; args->ports = 0; } static void otx2_map_ethtool_link_modes(u64 bitmask, struct cgx_set_link_mode_args *args) { switch (bitmask) { case ETHTOOL_LINK_MODE_10baseT_Half_BIT: set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_10baseT_Full_BIT: set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_100baseT_Half_BIT: set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_100baseT_Full_BIT: set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII)); break; case ETHTOOL_LINK_MODE_1000baseX_Full_BIT: set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX)); break; case ETHTOOL_LINK_MODE_10000baseT_Full_BIT: set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII)); break; case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT: set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C)); break; case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT: set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M)); break; case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT: set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR)); break; case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT: set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C)); break; case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR)); break; case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT: set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR)); break; case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C)); break; case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M)); break; case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4)); break; case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4)); break; case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT: set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C)); break; case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT: set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M)); break; case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT: set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR)); break; case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT: set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR)); break; case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT: set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C)); break; case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT: set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M)); break; case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT: set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4)); break; case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT: set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4)); break; default: set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX)); break; } } static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { const char *lmac_string; linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); if (linfo->lmac_type_id >= LMAC_MODE_MAX) { dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", linfo->lmac_type_id, cgx->cgx_id, lmac_id); strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1); return; } lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); } /* Hardware event handlers */ static inline void cgx_link_change_handler(u64 lstat, struct lmac *lmac) { struct cgx_link_user_info *linfo; struct cgx *cgx = lmac->cgx; struct cgx_link_event event; struct device *dev; int err_type; dev = &cgx->pdev->dev; link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); event.cgx_id = cgx->cgx_id; event.lmac_id = lmac->lmac_id; /* update the local copy of link status */ lmac->link_info = event.link_uinfo; linfo = &lmac->link_info; if (err_type == CGX_ERR_SPEED_CHANGE_INVALID) return; /* Ensure callback doesn't get unregistered until we finish it */ spin_lock(&lmac->event_cb_lock); if (!lmac->event_cb.notify_link_chg) { dev_dbg(dev, "cgx port %d:%d Link change handler null", cgx->cgx_id, lmac->lmac_id); if (err_type != CGX_ERR_NONE) { dev_err(dev, "cgx port %d:%d Link error %d\n", cgx->cgx_id, lmac->lmac_id, err_type); } dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", cgx->cgx_id, lmac->lmac_id, linfo->link_up ? "UP" : "DOWN", linfo->speed); goto err; } if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) dev_err(dev, "event notification failure\n"); err: spin_unlock(&lmac->event_cb_lock); } static inline bool cgx_cmdresp_is_linkevent(u64 event) { u8 id; id = FIELD_GET(EVTREG_ID, event); if (id == CGX_CMD_LINK_BRING_UP || id == CGX_CMD_LINK_BRING_DOWN || id == CGX_CMD_MODE_CHANGE) return true; else return false; } static inline bool cgx_event_is_linkevent(u64 event) { if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) return true; else return false; } static irqreturn_t cgx_fwi_event_handler(int irq, void *data) { u64 event, offset, clear_bit; struct lmac *lmac = data; struct cgx *cgx; cgx = lmac->cgx; /* Clear SW_INT for RPM and CMR_INT for CGX */ offset = cgx->mac_ops->int_register; clear_bit = cgx->mac_ops->int_ena_bit; event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); if (!FIELD_GET(EVTREG_ACK, event)) return IRQ_NONE; switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { case CGX_EVT_CMD_RESP: /* Copy the response. Since only one command is active at a * time, there is no way a response can get overwritten */ lmac->resp = event; /* Ensure response is updated before thread context starts */ smp_wmb(); /* There wont be separate events for link change initiated from * software; Hence report the command responses as events */ if (cgx_cmdresp_is_linkevent(event)) cgx_link_change_handler(event, lmac); /* Release thread waiting for completion */ lmac->cmd_pend = false; wake_up_interruptible(&lmac->wq_cmd_cmplt); break; case CGX_EVT_ASYNC: if (cgx_event_is_linkevent(event)) cgx_link_change_handler(event, lmac); break; } /* Any new event or command response will be posted by firmware * only after the current status is acked. * Ack the interrupt register as well. */ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); return IRQ_HANDLED; } /* APIs for PHY management using CGX firmware interface */ /* callback registration for hardware events like link change */ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) { struct cgx *cgx = cgxd; struct lmac *lmac; lmac = lmac_pdata(lmac_id, cgx); if (!lmac) return -ENODEV; lmac->event_cb = *cb; return 0; } int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) { struct lmac *lmac; unsigned long flags; struct cgx *cgx = cgxd; lmac = lmac_pdata(lmac_id, cgx); if (!lmac) return -ENODEV; spin_lock_irqsave(&lmac->event_cb_lock, flags); lmac->event_cb.notify_link_chg = NULL; lmac->event_cb.data = NULL; spin_unlock_irqrestore(&lmac->event_cb_lock, flags); return 0; } int cgx_get_fwdata_base(u64 *base) { u64 req = 0, resp; struct cgx *cgx; int first_lmac; int err; cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); if (!cgx) return -ENXIO; first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); if (!err) *base = FIELD_GET(RESP_FWD_BASE, resp); return err; } int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, int cgx_id, int lmac_id) { struct cgx *cgx = cgxd; u64 req = 0, resp; if (!cgx) return -ENODEV; if (args.mode) otx2_map_ethtool_link_modes(args.mode, &args); if (!args.speed && args.duplex && !args.an) return -EINVAL; req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); req = FIELD_SET(CMDMODECHANGE_SPEED, cgx_link_usertable_index_map(args.speed), req); req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); } int cgx_set_fec(u64 fec, int cgx_id, int lmac_id) { u64 req = 0, resp; struct cgx *cgx; int err = 0; cgx = cgx_get_pdata(cgx_id); if (!cgx) return -ENXIO; req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req); req = FIELD_SET(CMDSETFEC, fec, req); err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); if (err) return err; cgx->lmac_idmap[lmac_id]->link_info.fec = FIELD_GET(RESP_LINKSTAT_FEC, resp); return cgx->lmac_idmap[lmac_id]->link_info.fec; } int cgx_get_phy_fec_stats(void *cgxd, int lmac_id) { struct cgx *cgx = cgxd; u64 req = 0, resp; if (!cgx) return -ENODEV; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req); return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); } static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) { u64 req = 0; u64 resp; if (enable) { req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); /* On CN10K firmware offloads link bring up/down operations to ECP * On Octeontx2 link operations are handled by firmware itself * which can cause mbox errors so configure maximum time firmware * poll for Link as 1000 ms */ if (!is_dev_rpm(cgx)) req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req); } else { req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); } return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); } static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); u64 req = 0; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac); } static int cgx_lmac_verify_fwi_version(struct cgx *cgx) { struct device *dev = &cgx->pdev->dev; int major_ver, minor_ver; u64 resp; int err; if (!cgx->lmac_count) return 0; err = cgx_fwi_read_version(&resp, cgx); if (err) return err; major_ver = FIELD_GET(RESP_MAJOR_VER, resp); minor_ver = FIELD_GET(RESP_MINOR_VER, resp); dev_dbg(dev, "Firmware command interface version = %d.%d\n", major_ver, minor_ver); if (major_ver != CGX_FIRMWARE_MAJOR_VER) return -EIO; else return 0; } static void cgx_lmac_linkup_work(struct work_struct *work) { struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); struct device *dev = &cgx->pdev->dev; int i, err; /* Do Link up for all the enabled lmacs */ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { err = cgx_fwi_link_change(cgx, i, true); if (err) dev_info(dev, "cgx port %d:%d Link up command failed\n", cgx->cgx_id, i); } } int cgx_lmac_linkup_start(void *cgxd) { struct cgx *cgx = cgxd; if (!cgx) return -ENODEV; queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); return 0; } int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr) { struct cgx *cgx = cgxd; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; /* Resetting PFC related CSRs */ cfg = 0xff; cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg); if (pf_req_flr) cgx_lmac_internal_loopback(cgxd, lmac_id, false); return 0; } static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, int cnt, bool req_free) { struct mac_ops *mac_ops = cgx->mac_ops; u64 offset, ena_bit; unsigned int irq; int err; irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + cnt * mac_ops->irq_offset); offset = mac_ops->int_set_reg; ena_bit = mac_ops->int_ena_bit; if (req_free) { free_irq(irq, lmac); return 0; } err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); if (err) return err; /* Enable interrupt */ cgx_write(cgx, lmac->lmac_id, offset, ena_bit); return 0; } int cgx_get_nr_lmacs(void *cgxd) { struct cgx *cgx = cgxd; return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL; } u8 cgx_get_lmacid(void *cgxd, u8 lmac_index) { struct cgx *cgx = cgxd; return cgx->lmac_idmap[lmac_index]->lmac_id; } unsigned long cgx_get_lmac_bmap(void *cgxd) { struct cgx *cgx = cgxd; return cgx->lmac_bmap; } static int cgx_lmac_init(struct cgx *cgx) { struct lmac *lmac; u64 lmac_list; int i, err; /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled */ if (cgx->mac_ops->non_contiguous_serdes_lane) { if (is_dev_rpm2(cgx)) lmac_list = cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL; else lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; } if (cgx->lmac_count > cgx->max_lmac_per_mac) cgx->lmac_count = cgx->max_lmac_per_mac; for (i = 0; i < cgx->lmac_count; i++) { lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); if (!lmac) return -ENOMEM; lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); if (!lmac->name) { err = -ENOMEM; goto err_lmac_free; } sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); if (cgx->mac_ops->non_contiguous_serdes_lane) { lmac->lmac_id = __ffs64(lmac_list); lmac_list &= ~BIT_ULL(lmac->lmac_id); } else { lmac->lmac_id = i; } lmac->cgx = cgx; lmac->mac_to_index_bmap.max = cgx->mac_ops->dmac_filter_count / cgx->lmac_count; err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); if (err) goto err_name_free; /* Reserve first entry for default MAC address */ set_bit(0, lmac->mac_to_index_bmap.bmap); lmac->rx_fc_pfvf_bmap.max = 128; err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap); if (err) goto err_dmac_bmap_free; lmac->tx_fc_pfvf_bmap.max = 128; err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap); if (err) goto err_rx_fc_bmap_free; init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); if (err) goto err_bitmap_free; /* Add reference */ cgx->lmac_idmap[lmac->lmac_id] = lmac; set_bit(lmac->lmac_id, &cgx->lmac_bmap); cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); } return cgx_lmac_verify_fwi_version(cgx); err_bitmap_free: rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap); err_rx_fc_bmap_free: rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap); err_dmac_bmap_free: rvu_free_bitmap(&lmac->mac_to_index_bmap); err_name_free: kfree(lmac->name); err_lmac_free: kfree(lmac); return err; } static int cgx_lmac_exit(struct cgx *cgx) { struct lmac *lmac; int i; if (cgx->cgx_cmd_workq) { destroy_workqueue(cgx->cgx_cmd_workq); cgx->cgx_cmd_workq = NULL; } /* Free all lmac related resources */ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { lmac = cgx->lmac_idmap[i]; if (!lmac) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } return 0; } static void cgx_populate_features(struct cgx *cgx) { u64 cfg; cfg = cgx_read(cgx, 0, CGX_CONST); cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); if (is_dev_rpm(cgx)) cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); else cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) { if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || is_dev_rpm2(cgx)) return 0x80; else return 0x60; } static struct mac_ops cgx_mac_ops = { .name = "cgx", .csr_offset = 0, .lmac_offset = 18, .int_register = CGXX_CMRX_INT, .int_set_reg = CGXX_CMRX_INT_ENA_W1S, .irq_offset = 9, .int_ena_bit = FW_CGX_INT, .lmac_fwi = CGX_LMAC_FWI, .non_contiguous_serdes_lane = false, .rx_stats_cnt = 9, .tx_stats_cnt = 18, .dmac_filter_count = 32, .get_nr_lmacs = cgx_get_nr_lmacs, .get_lmac_type = cgx_get_lmac_type, .lmac_fifo_len = cgx_get_lmac_fifo_len, .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, .mac_get_rx_stats = cgx_get_rx_stats, .mac_get_tx_stats = cgx_get_tx_stats, .get_fec_stats = cgx_get_fec_stats, .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, .mac_enadis_ptp_config = cgx_lmac_ptp_config, .mac_rx_tx_enable = cgx_lmac_rx_tx_enable, .mac_tx_enable = cgx_lmac_tx_enable, .pfc_config = cgx_lmac_pfc_config, .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, .mac_reset = cgx_lmac_reset, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct cgx *cgx; int err, nvec; cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); if (!cgx) return -ENOMEM; cgx->pdev = pdev; pci_set_drvdata(pdev, cgx); /* Use mac_ops to get MAC specific features */ if (is_dev_rpm(cgx)) cgx->mac_ops = rpm_get_mac_ops(cgx); else cgx->mac_ops = &cgx_mac_ops; cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); goto err_disable_device; } /* MAP configuration registers */ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!cgx->reg_base) { dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); err = -ENOMEM; goto err_release_regions; } cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); if (!cgx->lmac_count) { dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); err = -EOPNOTSUPP; goto err_release_regions; } nvec = pci_msix_vec_count(cgx->pdev); err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); if (err < 0 || err != nvec) { dev_err(dev, "Request for %d msix vectors failed, err %d\n", nvec, err); goto err_release_regions; } cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & CGX_ID_MASK; /* init wq for processing linkup requests */ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); if (!cgx->cgx_cmd_workq) { dev_err(dev, "alloc workqueue failed for cgx cmd"); err = -ENOMEM; goto err_free_irq_vectors; } list_add(&cgx->cgx_list, &cgx_list); cgx_populate_features(cgx); mutex_init(&cgx->lock); err = cgx_lmac_init(cgx); if (err) goto err_release_lmac; return 0; err_release_lmac: cgx_lmac_exit(cgx); list_del(&cgx->cgx_list); err_free_irq_vectors: pci_free_irq_vectors(pdev); err_release_regions: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void cgx_remove(struct pci_dev *pdev) { struct cgx *cgx = pci_get_drvdata(pdev); if (cgx) { cgx_lmac_exit(cgx); list_del(&cgx->cgx_list); } pci_free_irq_vectors(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } struct pci_driver cgx_driver = { .name = DRV_NAME, .id_table = cgx_id_table, .probe = cgx_probe, .remove = cgx_remove, };
linux-master
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell MCS driver * * Copyright (C) 2022 Marvell. */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include "mcs.h" #include "mcs_reg.h" #define DRV_NAME "Marvell MCS Driver" #define PCI_CFG_REG_BAR_NUM 0 static const struct pci_device_id mcs_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) }, { 0, } /* end of table */ }; static LIST_HEAD(mcs_list); void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id) { u64 reg; reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id); stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id); stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id); stats->ctl_octet_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id); stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id); stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id); stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id); stats->unctl_octet_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id); stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id); stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id); stats->octet_protected_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id); stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id); stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id); stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg); } void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id) { u64 reg; reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id); stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id); stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id); stats->ctl_octet_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id); stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id); stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id); stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id); stats->unctl_octet_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id); stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id); stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id); stats->octet_validated_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id); stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id); stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id); stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id); stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id); stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id); stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id); stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id); stats->pkt_notag_cnt = mcs_reg_read(mcs, reg); } } void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir) { u64 reg; if (dir == MCS_RX) reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id); else reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id); stats->tcam_hit_cnt = mcs_reg_read(mcs, reg); } void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir) { u64 reg; if (dir == MCS_RX) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id); stats->tcam_miss_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id); stats->parser_err_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id); stats->preempt_err_cnt = mcs_reg_read(mcs, reg); } } else { reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id); stats->tcam_miss_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id); stats->parser_err_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id); stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg); } } void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir) { u64 reg; if (dir == MCS_RX) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id); stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id); stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id); stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id); stats->pkt_ok_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id); stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg); } else { reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id); stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id); stats->pkt_protected_cnt = mcs_reg_read(mcs, reg); } } void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir) { u64 reg; if (dir == MCS_RX) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id); stats->hit_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id); stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id); stats->pkt_late_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id); stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id); stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id); stats->pkt_delay_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id); stats->pkt_ok_cnt = mcs_reg_read(mcs, reg); } if (mcs->hw->mcs_blks == 1) { reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id); stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id); stats->octet_validate_cnt = mcs_reg_read(mcs, reg); } } else { reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id); stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id); stats->pkt_protected_cnt = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks == 1) { reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id); stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg); reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id); stats->octet_protected_cnt = mcs_reg_read(mcs, reg); } } } void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir) { struct mcs_flowid_stats flowid_st; struct mcs_port_stats port_st; struct mcs_secy_stats secy_st; struct mcs_sc_stats sc_st; struct mcs_sa_stats sa_st; u64 reg; if (dir == MCS_RX) reg = MCSX_CSE_RX_SLAVE_CTRL; else reg = MCSX_CSE_TX_SLAVE_CTRL; mcs_reg_write(mcs, reg, BIT_ULL(0)); switch (type) { case MCS_FLOWID_STATS: mcs_get_flowid_stats(mcs, &flowid_st, id, dir); break; case MCS_SECY_STATS: if (dir == MCS_RX) mcs_get_rx_secy_stats(mcs, &secy_st, id); else mcs_get_tx_secy_stats(mcs, &secy_st, id); break; case MCS_SC_STATS: mcs_get_sc_stats(mcs, &sc_st, id, dir); break; case MCS_SA_STATS: mcs_get_sa_stats(mcs, &sa_st, id, dir); break; case MCS_PORT_STATS: mcs_get_port_stats(mcs, &port_st, id, dir); break; } mcs_reg_write(mcs, reg, 0x0); } int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir) { struct mcs_rsrc_map *map; int id; if (dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; /* Clear FLOWID stats */ for (id = 0; id < map->flow_ids.max; id++) { if (map->flowid2pf_map[id] != pcifunc) continue; mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir); } /* Clear SECY stats */ for (id = 0; id < map->secy.max; id++) { if (map->secy2pf_map[id] != pcifunc) continue; mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir); } /* Clear SC stats */ for (id = 0; id < map->secy.max; id++) { if (map->sc2pf_map[id] != pcifunc) continue; mcs_clear_stats(mcs, MCS_SC_STATS, id, dir); } /* Clear SA stats */ for (id = 0; id < map->sa.max; id++) { if (map->sa2pf_map[id] != pcifunc) continue; mcs_clear_stats(mcs, MCS_SA_STATS, id, dir); } return 0; } void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir) { u64 reg; if (dir == MCS_RX) reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id); else reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id); mcs_reg_write(mcs, reg, next_pn); } void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) { u64 reg, val; val = (map->sa_index0 & 0xFF) | (map->sa_index1 & 0xFF) << 9 | (map->rekey_ena & 0x1) << 18 | (map->sa_index0_vld & 0x1) << 19 | (map->sa_index1_vld & 0x1) << 20 | (map->tx_sa_active & 0x1) << 21 | map->sectag_sci << 22; reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); mcs_reg_write(mcs, reg, val); val = map->sectag_sci >> 42; reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id); mcs_reg_write(mcs, reg, val); } void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) { u64 val, reg; val = (map->sa_index & 0xFF) | map->sa_in_use << 9; reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); mcs_reg_write(mcs, reg, val); } void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir) { int reg_id; u64 reg; if (dir == MCS_RX) { for (reg_id = 0; reg_id < 8; reg_id++) { reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id); mcs_reg_write(mcs, reg, plcy[reg_id]); } } else { for (reg_id = 0; reg_id < 9; reg_id++) { reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id); mcs_reg_write(mcs, reg, plcy[reg_id]); } } } void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena) { u64 reg, val; reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0); if (sc_id > 63) reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1); if (ena) val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id); else val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id); mcs_reg_write(mcs, reg, val); } void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id) { mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci); mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy); /* Enable SC CAM */ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true); } void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir) { u64 reg; if (dir == MCS_RX) reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id); else reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id); mcs_reg_write(mcs, reg, plcy); if (mcs->hw->mcs_blks == 1 && dir == MCS_RX) mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull); } void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) { u64 reg, val; val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8; if (dir == MCS_RX) { reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); } else { val |= (map->sc & 0x7F) << 9; reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); } mcs_reg_write(mcs, reg, val); } void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena) { u64 reg, val; if (dir == MCS_RX) { reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0; if (flow_id > 63) reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1; } else { reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0; if (flow_id > 63) reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1; } /* Enable/Disable the tcam entry */ if (ena) val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id); else val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id); mcs_reg_write(mcs, reg, val); } void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir) { int reg_id; u64 reg; if (dir == MCS_RX) { for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); mcs_reg_write(mcs, reg, data[reg_id]); } for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); mcs_reg_write(mcs, reg, mask[reg_id]); } } else { for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); mcs_reg_write(mcs, reg, data[reg_id]); } for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); mcs_reg_write(mcs, reg, mask[reg_id]); } } } int mcs_install_flowid_bypass_entry(struct mcs *mcs) { int flow_id, secy_id, reg_id; struct secy_mem_map map; u64 reg, plcy = 0; /* Flow entry */ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT; __set_bit(flow_id, mcs->rx.flow_ids.bmap); __set_bit(flow_id, mcs->tx.flow_ids.bmap); for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0)); } for (reg_id = 0; reg_id < 4; reg_id++) { reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0)); } /* secy */ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT; __set_bit(secy_id, mcs->rx.secy.bmap); __set_bit(secy_id, mcs->tx.secy.bmap); /* Set validate frames to NULL and enable control port */ plcy = 0x7ull; if (mcs->hw->mcs_blks > 1) plcy = BIT_ULL(0) | 0x3ull << 4; mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX); /* Enable control port and set mtu to max */ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28); if (mcs->hw->mcs_blks > 1) plcy = BIT_ULL(0) | GENMASK_ULL(63, 48); mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX); /* Map flowid to secy */ map.secy = secy_id; map.ctrl_pkt = 0; map.flow_id = flow_id; mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX); map.sc = secy_id; mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX); /* Enable Flowid entry */ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true); mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true); return 0; } void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir) { struct mcs_rsrc_map *map; int flow_id; if (dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; /* Clear secy memory to zero */ mcs_secy_plcy_write(mcs, 0, secy_id, dir); /* Disable the tcam entry using this secy */ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) { if (map->flowid2secy_map[flow_id] != secy_id) continue; mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false); } } int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc) { int rsrc_id; if (!rsrc->bmap) return -EINVAL; rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0); if (rsrc_id >= rsrc->max) return -ENOSPC; bitmap_set(rsrc->bmap, rsrc_id, 1); pf_map[rsrc_id] = pcifunc; return rsrc_id; } int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req) { u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; u64 dis, reg; int id, rc; reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE; map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx; if (req->all) { for (id = 0; id < map->ctrlpktrule.max; id++) { if (map->ctrlpktrule2pf_map[id] != pcifunc) continue; mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc); dis = mcs_reg_read(mcs, reg); dis &= ~BIT_ULL(id); mcs_reg_write(mcs, reg, dis); } return 0; } rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc); dis = mcs_reg_read(mcs, reg); dis &= ~BIT_ULL(req->rule_idx); mcs_reg_write(mcs, reg, dis); return rc; } int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req) { u64 reg, enb; u64 idx; switch (req->rule_type) { case MCS_CTRL_PKT_RULE_TYPE_ETH: req->data0 &= GENMASK(15, 0); if (req->data0 != ETH_P_PAE) return -EINVAL; idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET; reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) : MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx); mcs_reg_write(mcs, reg, req->data0); break; case MCS_CTRL_PKT_RULE_TYPE_DA: if (!(req->data0 & BIT_ULL(40))) return -EINVAL; idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET; reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) : MCSX_PEX_TX_SLAVE_RULE_DAX(idx); mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); break; case MCS_CTRL_PKT_RULE_TYPE_RANGE: if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40))) return -EINVAL; idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET; if (req->dir == MCS_RX) { reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx); mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx); mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); } else { reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx); mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx); mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); } break; case MCS_CTRL_PKT_RULE_TYPE_COMBO: req->data2 &= GENMASK(15, 0); if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40))) return -EINVAL; idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET; if (req->dir == MCS_RX) { reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx); mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx); mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx); mcs_reg_write(mcs, reg, req->data2); } else { reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx); mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx); mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0)); reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx); mcs_reg_write(mcs, reg, req->data2); } break; case MCS_CTRL_PKT_RULE_TYPE_MAC: if (!(req->data0 & BIT_ULL(40))) return -EINVAL; idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET; reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC : MCSX_PEX_TX_SLAVE_RULE_MAC; mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0)); break; } reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE; enb = mcs_reg_read(mcs, reg); enb |= BIT_ULL(req->rule_idx); mcs_reg_write(mcs, reg, enb); return 0; } int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc) { /* Check if the rsrc_id is mapped to PF/VF */ if (pf_map[rsrc_id] != pcifunc) return -EINVAL; rvu_free_rsrc(rsrc, rsrc_id); pf_map[rsrc_id] = 0; return 0; } /* Free all the cam resources mapped to pf */ int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc) { struct mcs_rsrc_map *map; int id; if (dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; /* free tcam entries */ for (id = 0; id < map->flow_ids.max; id++) { if (map->flowid2pf_map[id] != pcifunc) continue; mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, id, pcifunc); mcs_ena_dis_flowid_entry(mcs, id, dir, false); } /* free secy entries */ for (id = 0; id < map->secy.max; id++) { if (map->secy2pf_map[id] != pcifunc) continue; mcs_free_rsrc(&map->secy, map->secy2pf_map, id, pcifunc); mcs_clear_secy_plcy(mcs, id, dir); } /* free sc entries */ for (id = 0; id < map->secy.max; id++) { if (map->sc2pf_map[id] != pcifunc) continue; mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc); /* Disable SC CAM only on RX side */ if (dir == MCS_RX) mcs_ena_dis_sc_cam_entry(mcs, id, false); } /* free sa entries */ for (id = 0; id < map->sa.max; id++) { if (map->sa2pf_map[id] != pcifunc) continue; mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc); } return 0; } int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc) { int rsrc_id; rsrc_id = rvu_alloc_rsrc(rsrc); if (rsrc_id < 0) return -ENOMEM; pf_map[rsrc_id] = pcifunc; return rsrc_id; } int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id, u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir) { struct mcs_rsrc_map *map; int id; if (dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc); if (id < 0) return -ENOMEM; *flow_id = id; id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc); if (id < 0) return -ENOMEM; *secy_id = id; id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc); if (id < 0) return -ENOMEM; *sc_id = id; id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); if (id < 0) return -ENOMEM; *sa1_id = id; id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); if (id < 0) return -ENOMEM; *sa2_id = id; return 0; } static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) { struct mcs_intr_event event = { 0 }; struct rsrc_bmap *sc_bmap; u64 val; int sc; sc_bmap = &mcs->tx.sc; event.mcs_id = mcs->mcs_id; event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); if (mcs->tx_sa_active[sc]) /* SA_index1 was used and got expired */ event.sa_id = (val >> 9) & 0xFF; else /* SA_index0 was used and got expired */ event.sa_id = val & 0xFF; event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; mcs_add_intr_wq_entry(mcs, &event); } } static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) { struct mcs_intr_event event = { 0 }; struct rsrc_bmap *sc_bmap; u64 val, status; int sc; sc_bmap = &mcs->tx.sc; event.mcs_id = mcs->mcs_id; event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; /* TX SA interrupt is raised only if autorekey is enabled. * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies * SA in SA_index1 got expired else SA in SA_index0 got expired. */ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); /* Auto rekey is enable */ if (!((val >> 18) & 0x1)) continue; status = (val >> 21) & 0x1; /* Check if tx_sa_active status had changed */ if (status == mcs->tx_sa_active[sc]) continue; /* SA_index0 is expired */ if (status) event.sa_id = val & 0xFF; else event.sa_id = (val >> 9) & 0xFF; event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; mcs_add_intr_wq_entry(mcs, &event); } } static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs) { struct mcs_intr_event event = { 0 }; int sa, reg; u64 intr; /* Check expired SAs */ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) { /* Bit high in *PN_THRESH_REACHEDX implies * corresponding SAs are expired. */ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg)); for (sa = 0; sa < 64; sa++) { if (!(intr & BIT_ULL(sa))) continue; event.mcs_id = mcs->mcs_id; event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT; event.sa_id = sa + (reg * 64); event.pcifunc = mcs->rx.sa2pf_map[event.sa_id]; mcs_add_intr_wq_entry(mcs, &event); } } } static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr) { struct mcs_intr_event event = { 0 }; event.mcs_id = mcs->mcs_id; event.pcifunc = mcs->pf_map[0]; if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1) event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT; if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1) event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT; if (intr & MCS_CPM_RX_INT_SL_GTE48) event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT; if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1) event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT; if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1) event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT; if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0) event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT; mcs_add_intr_wq_entry(mcs, &event); } static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr) { struct mcs_intr_event event = { 0 }; if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID)) return; event.mcs_id = mcs->mcs_id; event.pcifunc = mcs->pf_map[0]; event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT; mcs_add_intr_wq_entry(mcs, &event); } void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) { u64 val, reg; int lmac; if (!(intr & 0x6ULL)) return; if (intr & BIT_ULL(1)) reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 : MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0; else reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 : MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0; val = mcs_reg_read(mcs, reg); /* policy/data over flow occurred */ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { if (!(val & BIT_ULL(lmac))) continue; dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac); } } void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) { int lmac; if (!(intr & 0xFFFFFULL)) return; for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { if (intr & BIT_ULL(lmac)) dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac); } } static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq) { struct mcs *mcs = (struct mcs *)mcs_irq; u64 intr, cpm_intr, bbe_intr, pab_intr; /* Disable the interrupt */ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0)); /* Check which block has interrupt*/ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM); /* CPM RX */ if (intr & MCS_CPM_RX_INT_ENA) { /* Check for PN thresh interrupt bit */ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT); if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED) mcs_rx_pn_thresh_reached_handler(mcs); if (cpm_intr & MCS_CPM_RX_INT_ALL) mcs_rx_misc_intr_handler(mcs, cpm_intr); /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr); } /* CPM TX */ if (intr & MCS_CPM_TX_INT_ENA) { cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT); if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) { if (mcs->hw->mcs_blks > 1) cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs); else cn10kb_mcs_tx_pn_thresh_reached_handler(mcs); } if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID) mcs_tx_misc_intr_handler(mcs, cpm_intr); if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) { if (mcs->hw->mcs_blks > 1) cnf10kb_mcs_tx_pn_wrapped_handler(mcs); else cn10kb_mcs_tx_pn_wrapped_handler(mcs); } /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr); } /* BBE RX */ if (intr & MCS_BBE_RX_INT_ENA) { bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT); mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX); /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr); } /* BBE TX */ if (intr & MCS_BBE_TX_INT_ENA) { bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT); mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX); /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr); } /* PAB RX */ if (intr & MCS_PAB_RX_INT_ENA) { pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT); mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX); /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr); } /* PAB TX */ if (intr & MCS_PAB_TX_INT_ENA) { pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT); mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX); /* Clear the interrupt */ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0); mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr); } /* Clear and enable the interrupt */ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0)); mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); return IRQ_HANDLED; } static void *alloc_mem(struct mcs *mcs, int n) { return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL); } static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res) { struct hwinfo *hw = mcs->hw; int err; res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries); if (!res->flowid2pf_map) return -ENOMEM; res->secy2pf_map = alloc_mem(mcs, hw->secy_entries); if (!res->secy2pf_map) return -ENOMEM; res->sc2pf_map = alloc_mem(mcs, hw->sc_entries); if (!res->sc2pf_map) return -ENOMEM; res->sa2pf_map = alloc_mem(mcs, hw->sa_entries); if (!res->sa2pf_map) return -ENOMEM; res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries); if (!res->flowid2secy_map) return -ENOMEM; res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES); if (!res->ctrlpktrule2pf_map) return -ENOMEM; res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT; err = rvu_alloc_bitmap(&res->flow_ids); if (err) return err; res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT; err = rvu_alloc_bitmap(&res->secy); if (err) return err; res->sc.max = hw->sc_entries; err = rvu_alloc_bitmap(&res->sc); if (err) return err; res->sa.max = hw->sa_entries; err = rvu_alloc_bitmap(&res->sa); if (err) return err; res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES; err = rvu_alloc_bitmap(&res->ctrlpktrule); if (err) return err; return 0; } static int mcs_register_interrupts(struct mcs *mcs) { int ret = 0; mcs->num_vec = pci_msix_vec_count(mcs->pdev); ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec, mcs->num_vec, PCI_IRQ_MSIX); if (ret < 0) { dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n", mcs->num_vec, ret); return ret; } ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs_ip_intr_handler, 0, "MCS_IP", mcs); if (ret) { dev_err(mcs->dev, "MCS IP irq registration failed\n"); goto exit; } /* MCS enable IP interrupts */ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); /* Enable CPM Rx/Tx interrupts */ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB, MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA | MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA | MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA); mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL); mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL); mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL); mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL); mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL); mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL); mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); if (!mcs->tx_sa_active) { ret = -ENOMEM; goto free_irq; } return ret; free_irq: free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs); exit: pci_free_irq_vectors(mcs->pdev); mcs->num_vec = 0; return ret; } int mcs_get_blkcnt(void) { struct mcs *mcs; int idmax = -ENODEV; /* Check MCS block is present in hardware */ if (!pci_dev_present(mcs_id_table)) return 0; list_for_each_entry(mcs, &mcs_list, mcs_list) if (mcs->mcs_id > idmax) idmax = mcs->mcs_id; if (idmax < 0) return 0; return idmax + 1; } struct mcs *mcs_get_pdata(int mcs_id) { struct mcs *mcs_dev; list_for_each_entry(mcs_dev, &mcs_list, mcs_list) { if (mcs_dev->mcs_id == mcs_id) return mcs_dev; } return NULL; } void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req) { u64 val = 0; mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id), req->port_mode & MCS_PORT_MODE_MASK); req->cstm_tag_rel_mode_sel &= 0x3; if (mcs->hw->mcs_blks > 1) { req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK; val = (u32)req->fifo_skid << 0x10; val |= req->fifo_skid; mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val); mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id), req->cstm_tag_rel_mode_sel); val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION); if (req->custom_hdr_enb) val |= BIT_ULL(req->port_id); else val &= ~BIT_ULL(req->port_id); mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val); } else { val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id)); val |= (req->cstm_tag_rel_mode_sel << 2); mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val); } } void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req, struct mcs_port_cfg_get_rsp *rsp) { u64 reg = 0; rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) & MCS_PORT_MODE_MASK; if (mcs->hw->mcs_blks > 1) { reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id); rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK; reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id); rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3; if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id)) rsp->custom_hdr_enb = 1; } else { reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id); rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2; } rsp->port_id = req->port_id; rsp->mcs_id = req->mcs_id; } void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req, struct mcs_custom_tag_cfg_get_rsp *rsp) { u64 reg = 0, val = 0; u8 idx; for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) { if (mcs->hw->mcs_blks > 1) reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) : MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx); else reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) : MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx); val = mcs_reg_read(mcs, reg); if (mcs->hw->mcs_blks > 1) { rsp->cstm_etype[idx] = val & GENMASK(15, 0); rsp->cstm_indx[idx] = (val >> 0x16) & 0x3; reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE : MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF; } else { rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0); rsp->cstm_indx[idx] = (val >> 0x11) & 0x3; rsp->cstm_etype_en |= (val & 0x1) << idx; } } rsp->mcs_id = req->mcs_id; rsp->dir = req->dir; } void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset) { u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id); mcs_reg_write(mcs, reg, reset & 0x1); } /* Set lmac to bypass/operational mode */ void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode) { u64 reg; int id = lmac_id * 2; reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id); mcs_reg_write(mcs, reg, (u64)mode); reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1)); mcs_reg_write(mcs, reg, (u64)mode); } void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn) { u64 reg; if (pn->dir == MCS_RX) reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD; else reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD; mcs_reg_write(mcs, reg, pn->threshold); } void cn10kb_mcs_parser_cfg(struct mcs *mcs) { u64 reg, val; /* VLAN CTag */ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17); /* RX */ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0); mcs_reg_write(mcs, reg, val); /* TX */ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0); mcs_reg_write(mcs, reg, val); /* VLAN STag */ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18); /* RX */ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1); mcs_reg_write(mcs, reg, val); /* TX */ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1); mcs_reg_write(mcs, reg, val); } static void mcs_lmac_init(struct mcs *mcs, int lmac_id) { u64 reg; /* Port mode 25GB */ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id); mcs_reg_write(mcs, reg, 0); if (mcs->hw->mcs_blks > 1) { reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id); mcs_reg_write(mcs, reg, 0xe000e); return; } reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id); mcs_reg_write(mcs, reg, 0); } int mcs_set_lmac_channels(int mcs_id, u16 base) { struct mcs *mcs; int lmac; u64 cfg; mcs = mcs_get_pdata(mcs_id); if (!mcs) return -ENODEV; for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac)); cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK); cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16)); cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base); mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg); base += 16; } return 0; } static int mcs_x2p_calibration(struct mcs *mcs) { unsigned long timeout = jiffies + usecs_to_jiffies(20000); int i, err = 0; u64 val; /* set X2P calibration */ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); val |= BIT_ULL(5); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); /* Wait for calibration to complete */ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) { if (time_before(jiffies, timeout)) { usleep_range(80, 100); continue; } else { err = -EBUSY; dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n"); return err; } } val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS); for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) { if (val & BIT_ULL(1 + i)) continue; err = -EBUSY; dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i); } /* Clear X2P calibrate */ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5)); return err; } static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass) { u64 val; /* Set MCS to external bypass */ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); if (bypass) val |= BIT_ULL(6); else val &= ~BIT_ULL(6); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); } static void mcs_global_cfg(struct mcs *mcs) { /* Disable external bypass */ mcs_set_external_bypass(mcs, false); /* Reset TX/RX stats memory */ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F); mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F); /* Set MCS to perform standard IEEE802.1AE macsec processing */ if (mcs->hw->mcs_blks == 1) { mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3)); return; } mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4); mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4); } void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs) { struct hwinfo *hw = mcs->hw; hw->tcam_entries = 128; /* TCAM entries */ hw->secy_entries = 128; /* SecY entries */ hw->sc_entries = 128; /* SC CAM entries */ hw->sa_entries = 256; /* SA entries */ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */ hw->mcs_x2p_intf = 5; /* x2p clabration intf */ hw->mcs_blks = 1; /* MCS blocks */ hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */ } static struct mcs_ops cn10kb_mcs_ops = { .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities, .mcs_parser_cfg = cn10kb_mcs_parser_cfg, .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write, .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write, .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map, .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler, .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler, }; static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; int lmac, err = 0; struct mcs *mcs; mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL); if (!mcs) return -ENOMEM; mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL); if (!mcs->hw) return -ENOMEM; err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); goto exit; } mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!mcs->reg_base) { dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n"); err = -ENOMEM; goto exit; } pci_set_drvdata(pdev, mcs); mcs->pdev = pdev; mcs->dev = &pdev->dev; if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B) mcs->mcs_ops = &cn10kb_mcs_ops; else mcs->mcs_ops = cnf10kb_get_mac_ops(); /* Set hardware capabilities */ mcs->mcs_ops->mcs_set_hw_capabilities(mcs); mcs_global_cfg(mcs); /* Perform X2P clibration */ err = mcs_x2p_calibration(mcs); if (err) goto err_x2p; mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & MCS_ID_MASK; /* Set mcs tx side resources */ err = mcs_alloc_struct_mem(mcs, &mcs->tx); if (err) goto err_x2p; /* Set mcs rx side resources */ err = mcs_alloc_struct_mem(mcs, &mcs->rx); if (err) goto err_x2p; /* per port config */ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) mcs_lmac_init(mcs, lmac); /* Parser configuration */ mcs->mcs_ops->mcs_parser_cfg(mcs); err = mcs_register_interrupts(mcs); if (err) goto exit; list_add(&mcs->mcs_list, &mcs_list); mutex_init(&mcs->stats_lock); return 0; err_x2p: /* Enable external bypass */ mcs_set_external_bypass(mcs, true); exit: pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void mcs_remove(struct pci_dev *pdev) { struct mcs *mcs = pci_get_drvdata(pdev); /* Set MCS to external bypass */ mcs_set_external_bypass(mcs, true); free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs); pci_free_irq_vectors(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } struct pci_driver mcs_driver = { .name = DRV_NAME, .id_table = mcs_id_table, .probe = mcs_probe, .remove = mcs_remove, };
linux-master
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * */ #include "cgx.h" #include "lmac_common.h" static struct mac_ops rpm_mac_ops = { .name = "rpm", .csr_offset = 0x4e00, .lmac_offset = 20, .int_register = RPMX_CMRX_SW_INT, .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S, .irq_offset = 1, .int_ena_bit = BIT_ULL(0), .lmac_fwi = RPM_LMAC_FWI, .non_contiguous_serdes_lane = true, .rx_stats_cnt = 43, .tx_stats_cnt = 34, .dmac_filter_count = 32, .get_nr_lmacs = rpm_get_nr_lmacs, .get_lmac_type = rpm_get_lmac_type, .lmac_fifo_len = rpm_get_lmac_fifo_len, .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, .mac_get_rx_stats = rpm_get_rx_stats, .mac_get_tx_stats = rpm_get_tx_stats, .get_fec_stats = rpm_get_fec_stats, .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, .mac_enadis_ptp_config = rpm_lmac_ptp_config, .mac_rx_tx_enable = rpm_lmac_rx_tx_enable, .mac_tx_enable = rpm_lmac_tx_enable, .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, }; static struct mac_ops rpm2_mac_ops = { .name = "rpm", .csr_offset = RPM2_CSR_OFFSET, .lmac_offset = 20, .int_register = RPM2_CMRX_SW_INT, .int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S, .irq_offset = 1, .int_ena_bit = BIT_ULL(0), .lmac_fwi = RPM2_LMAC_FWI, .non_contiguous_serdes_lane = true, .rx_stats_cnt = 43, .tx_stats_cnt = 34, .dmac_filter_count = 64, .get_nr_lmacs = rpm2_get_nr_lmacs, .get_lmac_type = rpm_get_lmac_type, .lmac_fifo_len = rpm2_get_lmac_fifo_len, .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, .mac_get_rx_stats = rpm_get_rx_stats, .mac_get_tx_stats = rpm_get_tx_stats, .get_fec_stats = rpm_get_fec_stats, .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, .mac_enadis_ptp_config = rpm_lmac_ptp_config, .mac_rx_tx_enable = rpm_lmac_rx_tx_enable, .mac_tx_enable = rpm_lmac_tx_enable, .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, }; bool is_dev_rpm2(void *rpmd) { rpm_t *rpm = rpmd; return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM); } struct mac_ops *rpm_get_mac_ops(rpm_t *rpm) { if (is_dev_rpm2(rpm)) return &rpm2_mac_ops; else return &rpm_mac_ops; } static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val) { cgx_write(rpm, lmac, offset, val); } static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset) { return cgx_read(rpm, lmac, offset); } /* Read HW major version to determine RPM * MAC type 100/USX */ static bool is_mac_rpmusx(void *rpmd) { rpm_t *rpm = rpmd; return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL; } int rpm_get_nr_lmacs(void *rpmd) { rpm_t *rpm = rpmd; return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL); } int rpm2_get_nr_lmacs(void *rpmd) { rpm_t *rpm = rpmd; return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL); } int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; u64 cfg, last; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); last = cfg; if (enable) cfg |= RPM_TX_EN; else cfg &= ~(RPM_TX_EN); if (cfg != last) rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); return !!(last & RPM_TX_EN); } int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); if (enable) cfg |= RPM_RX_EN | RPM_TX_EN; else cfg &= ~(RPM_RX_EN | RPM_TX_EN); rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); return 0; } void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; struct lmac *lmac; u64 cfg; if (!rpm) return; lmac = lmac_pdata(lmac_id, rpm); if (!lmac) return; /* Pause frames are not enabled just return */ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) return; if (enable) { cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); } else { cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); } } int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause) { rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) { *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE); *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE); } return 0; } static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, unsigned long pfc_en, bool enable) { u64 quanta_offset = 0, quanta_thresh = 0, cfg; int i, shift; /* Set pause time and interval */ for_each_set_bit(i, &pfc_en, 16) { switch (i) { case 0: case 1: quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH; break; case 2: case 3: quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH; break; case 4: case 5: quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH; break; case 6: case 7: quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH; break; case 8: case 9: quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH; break; case 10: case 11: quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH; break; case 12: case 13: quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH; break; case 14: case 15: quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA; quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH; break; } if (!quanta_offset || !quanta_thresh) continue; shift = (i % 2) ? 1 : 0; cfg = rpm_read(rpm, lmac_id, quanta_offset); if (enable) { cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16); } else { if (!shift) cfg &= ~GENMASK_ULL(15, 0); else cfg &= ~GENMASK_ULL(31, 16); } rpm_write(rpm, lmac_id, quanta_offset, cfg); cfg = rpm_read(rpm, lmac_id, quanta_thresh); if (enable) { cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16); } else { if (!shift) cfg &= ~GENMASK_ULL(15, 0); else cfg &= ~GENMASK_ULL(31, 16); } rpm_write(rpm, lmac_id, quanta_thresh, cfg); } } static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause) { u64 cfg; cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP); if (tx_pause) { /* Configure CL0 Pause Quanta & threshold * for 802.3X frames */ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true); cfg &= ~RPM2_CMR_RX_OVR_BP_EN; } else { /* Disable all Pause Quanta & threshold values */ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false); cfg |= RPM2_CMR_RX_OVR_BP_EN; cfg &= ~RPM2_CMR_RX_OVR_BP_BP; } rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg); } static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause) { u64 cfg; cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP); if (tx_pause) { /* Configure CL0 Pause Quanta & threshold for * 802.3X frames */ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true); cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id); } else { /* Disable all Pause Quanta & threshold values */ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false); cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id); cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id); } rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg); } int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause) { rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); if (is_dev_rpm2(rpm)) rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause); else rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause); return 0; } void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) { u64 cfg, pfc_class_mask_cfg; rpm_t *rpm = rpmd; /* ALL pause frames received are completely ignored */ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); /* Disable forward pause to TX block */ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); /* Disable pause frames transmission */ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); /* Enable channel mask for all LMACS */ if (is_dev_rpm2(rpm)) rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff); else rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); /* Disable all PFC classes */ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : RPMX_CMRX_PRT_CBFC_CTL; cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg); rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg); } int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) { rpm_t *rpm = rpmd; u64 val_lo, val_hi; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; mutex_lock(&rpm->lock); /* Update idx to point per lmac Rx statistics page */ idx += lmac_id * rpm->mac_ops->rx_stats_cnt; /* Read lower 32 bits of counter */ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX + (idx * 8)); /* upon read of lower 32 bits, higher 32 bits are written * to RPMX_MTI_STAT_DATA_HI_CDC */ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); *rx_stat = (val_hi << 32 | val_lo); mutex_unlock(&rpm->lock); return 0; } int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) { rpm_t *rpm = rpmd; u64 val_lo, val_hi; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; mutex_lock(&rpm->lock); /* Update idx to point per lmac Tx statistics page */ idx += lmac_id * rpm->mac_ops->tx_stats_cnt; val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX + (idx * 8)); val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); *tx_stat = (val_hi << 32 | val_lo); mutex_unlock(&rpm->lock); return 0; } u8 rpm_get_lmac_type(void *rpmd, int lmac_id) { rpm_t *rpm = rpmd; u64 req = 0, resp; int err; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); if (!err) return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); return err; } u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id) { rpm_t *rpm = rpmd; u64 hi_perf_lmac; u8 num_lmacs; u32 fifo_len; fifo_len = rpm->mac_ops->fifo_len; num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm); switch (num_lmacs) { case 1: return fifo_len; case 2: return fifo_len / 2; case 3: /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS); hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL; if (lmac_id == hi_perf_lmac) return fifo_len / 2; return fifo_len / 4; case 4: default: return fifo_len / 4; } return 0; } static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable) { u64 cfg; cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1); if (enable) cfg |= RPM2_USX_PCS_LBK; else cfg &= ~RPM2_USX_PCS_LBK; rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg); return 0; } u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id) { u64 hi_perf_lmac, lmac_info; rpm_t *rpm = rpmd; u8 num_lmacs; u32 fifo_len; lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS); /* LMACs are divided into two groups and each group * gets half of the FIFO * Group0 lmac_id range {0..3} * Group1 lmac_id range {4..7} */ fifo_len = rpm->mac_ops->fifo_len / 2; if (lmac_id < 4) { num_lmacs = hweight8(lmac_info & 0xF); hi_perf_lmac = (lmac_info >> 8) & 0x3ULL; } else { num_lmacs = hweight8(lmac_info & 0xF0); hi_perf_lmac = (lmac_info >> 10) & 0x3ULL; hi_perf_lmac += 4; } switch (num_lmacs) { case 1: return fifo_len; case 2: return fifo_len / 2; case 3: /* LMAC marked as hi_perf gets half of the FIFO * and rest 1/4th */ if (lmac_id == hi_perf_lmac) return fifo_len / 2; return fifo_len / 4; case 4: default: return fifo_len / 4; } return 0; } int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; struct lmac *lmac; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; lmac = lmac_pdata(lmac_id, rpm); if (lmac->lmac_type == LMAC_MODE_QSGMII || lmac->lmac_type == LMAC_MODE_SGMII) { dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n"); return 0; } if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm)) return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable); cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1); if (enable) cfg |= RPMX_MTI_PCS_LBK; else cfg &= ~RPMX_MTI_PCS_LBK; rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg); return 0; } void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) { rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return; cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG); if (enable) { cfg |= RPMX_RX_TS_PREPEND; cfg |= RPMX_TX_PTP_1S_SUPPORT; } else { cfg &= ~RPMX_RX_TS_PREPEND; cfg &= ~RPMX_TX_PTP_1S_SUPPORT; } rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg); cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE); if (enable) { cfg |= RPMX_ONESTEP_ENABLE; cfg &= ~RPMX_TS_BINARY_MODE; } else { cfg &= ~RPMX_ONESTEP_ENABLE; } rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg); } int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en) { u64 cfg, class_en, pfc_class_mask_cfg; rpm_t *rpm = rpmd; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL : RPMX_CMRX_PRT_CBFC_CTL; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg); pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en); if (rx_pause) { cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); } else { cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); } if (tx_pause) { rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true); cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en); } else { rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false); cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en); } if (!rx_pause && !tx_pause) cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; else cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en); return 0; } int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause) { rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) { *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE); *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE); } return 0; } int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp) { u64 val_lo, val_hi; rpm_t *rpm = rpmd; u64 cfg; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) return 0; if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO); val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); rsp->fec_corr_blks = (val_hi << 16 | val_lo); val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO); val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); rsp->fec_uncorr_blks = (val_hi << 16 | val_lo); /* 50G uses 2 Physical serdes lines */ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id == LMAC_MODE_50G_R) { val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL1_CCW_LO); val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); rsp->fec_corr_blks += (val_hi << 16 | val_lo); val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL1_NCCW_LO); val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); rsp->fec_uncorr_blks += (val_hi << 16 | val_lo); } } else { /* enable RS-FEC capture */ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id); rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2); val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); rsp->fec_corr_blks = (val_hi << 32 | val_lo); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3); val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); rsp->fec_uncorr_blks = (val_hi << 32 | val_lo); } return 0; } int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr) { u64 rx_logl_xon, cfg; rpm_t *rpm = rpmd; if (!is_lmac_valid(rpm, lmac_id)) return -ENODEV; /* Resetting PFC related CSRs */ rx_logl_xon = is_dev_rpm2(rpm) ? RPM2_CMRX_RX_LOGL_XON : RPMX_CMRX_RX_LOGL_XON; cfg = 0xff; rpm_write(rpm, lmac_id, rx_logl_xon, cfg); if (pf_req_flr) rpm_lmac_internal_loopback(rpm, lmac_id, false); return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rpm.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/bitfield.h> #include <linux/module.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, struct npa_aq_inst_s *inst) { struct admin_queue *aq = block->aq; struct npa_aq_res_s *result; int timeout = 1000; u64 reg, head; result = (struct npa_aq_res_s *)aq->res->base; /* Get current head pointer where to append this instruction */ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS); head = (reg >> 4) & AQ_PTR_MASK; memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), (void *)inst, aq->inst->entry_sz); memset(result, 0, sizeof(*result)); /* sync into memory */ wmb(); /* Ring the doorbell and wait for result */ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); while (result->compcode == NPA_AQ_COMP_NOTDONE) { cpu_relax(); udelay(1); timeout--; if (!timeout) return -EBUSY; } if (result->compcode != NPA_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ if (result->compcode == NPA_AQ_COMP_CTX_FAULT || result->compcode == NPA_AQ_COMP_LOCKERR || result->compcode == NPA_AQ_COMP_CTX_POISON) { if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0)) dev_err(rvu->dev, "%s: Not able to unlock cachelines\n", __func__); } return -EBUSY; } return 0; } int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int blkaddr, npalf, rc = 0; struct npa_aq_inst_s inst; struct rvu_block *block; struct admin_queue *aq; struct rvu_pfvf *pfvf; void *ctx, *mask; bool ena; pfvf = rvu_get_pfvf(rvu, pcifunc); if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize) return NPA_AF_ERR_AQ_ENQUEUE; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (!pfvf->npalf || blkaddr < 0) return NPA_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; aq = block->aq; if (!aq) { dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__); return NPA_AF_ERR_AQ_ENQUEUE; } npalf = rvu_get_lf(rvu, block, pcifunc, 0); if (npalf < 0) return NPA_AF_ERR_AF_LF_INVALID; memset(&inst, 0, sizeof(struct npa_aq_inst_s)); inst.cindex = req->aura_id; inst.lf = npalf; inst.ctype = req->ctype; inst.op = req->op; /* Currently we are not supporting enqueuing multiple instructions, * so always choose first entry in result memory. */ inst.res_addr = (u64)aq->res->iova; /* Hardware uses same aq->res->base for updating result of * previous instruction hence wait here till it is done. */ spin_lock(&aq->lock); /* Clean result + context memory */ memset(aq->res->base, 0, aq->res->entry_sz); /* Context needs to be written at RES_ADDR + 128 */ ctx = aq->res->base + 128; /* Mask needs to be written at RES_ADDR + 256 */ mask = aq->res->base + 256; switch (req->op) { case NPA_AQ_INSTOP_WRITE: /* Copy context and write mask */ if (req->ctype == NPA_AQ_CTYPE_AURA) { memcpy(mask, &req->aura_mask, sizeof(struct npa_aura_s)); memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); } else { memcpy(mask, &req->pool_mask, sizeof(struct npa_pool_s)); memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); } break; case NPA_AQ_INSTOP_INIT: if (req->ctype == NPA_AQ_CTYPE_AURA) { if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) { rc = NPA_AF_ERR_AQ_FULL; break; } /* Set pool's context address */ req->aura.pool_addr = pfvf->pool_ctx->iova + (req->aura.pool_addr * pfvf->pool_ctx->entry_sz); memcpy(ctx, &req->aura, sizeof(struct npa_aura_s)); } else { /* POOL's context */ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s)); } break; case NPA_AQ_INSTOP_NOP: case NPA_AQ_INSTOP_READ: case NPA_AQ_INSTOP_LOCK: case NPA_AQ_INSTOP_UNLOCK: break; default: rc = NPA_AF_ERR_AQ_FULL; break; } if (rc) { spin_unlock(&aq->lock); return rc; } /* Submit the instruction to AQ */ rc = npa_aq_enqueue_wait(rvu, block, &inst); if (rc) { spin_unlock(&aq->lock); return rc; } /* Set aura bitmap if aura hw context is enabled */ if (req->ctype == NPA_AQ_CTYPE_AURA) { if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena) __set_bit(req->aura_id, pfvf->aura_bmap); if (req->op == NPA_AQ_INSTOP_WRITE) { ena = (req->aura.ena & req->aura_mask.ena) | (test_bit(req->aura_id, pfvf->aura_bmap) & ~req->aura_mask.ena); if (ena) __set_bit(req->aura_id, pfvf->aura_bmap); else __clear_bit(req->aura_id, pfvf->aura_bmap); } } /* Set pool bitmap if pool hw context is enabled */ if (req->ctype == NPA_AQ_CTYPE_POOL) { if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena) __set_bit(req->aura_id, pfvf->pool_bmap); if (req->op == NPA_AQ_INSTOP_WRITE) { ena = (req->pool.ena & req->pool_mask.ena) | (test_bit(req->aura_id, pfvf->pool_bmap) & ~req->pool_mask.ena); if (ena) __set_bit(req->aura_id, pfvf->pool_bmap); else __clear_bit(req->aura_id, pfvf->pool_bmap); } } spin_unlock(&aq->lock); if (rsp) { /* Copy read context into mailbox */ if (req->op == NPA_AQ_INSTOP_READ) { if (req->ctype == NPA_AQ_CTYPE_AURA) memcpy(&rsp->aura, ctx, sizeof(struct npa_aura_s)); else memcpy(&rsp->pool, ctx, sizeof(struct npa_pool_s)); } } return 0; } static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npa_aq_enq_req aq_req; unsigned long *bmap; int id, cnt = 0; int err = 0, rc; if (!pfvf->pool_ctx || !pfvf->aura_ctx) return NPA_AF_ERR_AQ_ENQUEUE; memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); aq_req.hdr.pcifunc = req->hdr.pcifunc; if (req->ctype == NPA_AQ_CTYPE_POOL) { aq_req.pool.ena = 0; aq_req.pool_mask.ena = 1; cnt = pfvf->pool_ctx->qsize; bmap = pfvf->pool_bmap; } else if (req->ctype == NPA_AQ_CTYPE_AURA) { aq_req.aura.ena = 0; aq_req.aura_mask.ena = 1; aq_req.aura.bp_ena = 0; aq_req.aura_mask.bp_ena = 1; cnt = pfvf->aura_ctx->qsize; bmap = pfvf->aura_bmap; } aq_req.ctype = req->ctype; aq_req.op = NPA_AQ_INSTOP_WRITE; for (id = 0; id < cnt; id++) { if (!test_bit(id, bmap)) continue; aq_req.aura_id = id; rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL); if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", (req->ctype == NPA_AQ_CTYPE_AURA) ? "Aura" : "Pool", id); } } return err; } #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req) { struct npa_aq_enq_req lock_ctx_req; int err; if (req->op != NPA_AQ_INSTOP_INIT) return 0; memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req)); lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; lock_ctx_req.ctype = req->ctype; lock_ctx_req.op = NPA_AQ_INSTOP_LOCK; lock_ctx_req.aura_id = req->aura_id; err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL); if (err) dev_err(rvu->dev, "PFUNC 0x%x: Failed to lock NPA context %s:%d\n", req->hdr.pcifunc, (req->ctype == NPA_AQ_CTYPE_AURA) ? "Aura" : "Pool", req->aura_id); return err; } int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { int err; err = rvu_npa_aq_enq_inst(rvu, req, rsp); if (!err) err = npa_lf_hwctx_lockdown(rvu, req); return err; } #else int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } #endif int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { return npa_lf_hwctx_disable(rvu, req); } static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) { kfree(pfvf->aura_bmap); pfvf->aura_bmap = NULL; qmem_free(rvu->dev, pfvf->aura_ctx); pfvf->aura_ctx = NULL; kfree(pfvf->pool_bmap); pfvf->pool_bmap = NULL; qmem_free(rvu->dev, pfvf->pool_ctx); pfvf->pool_ctx = NULL; qmem_free(rvu->dev, pfvf->npa_qints_ctx); pfvf->npa_qints_ctx = NULL; } int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp) { int npalf, qints, hwctx_size, err, rc = 0; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; struct rvu_pfvf *pfvf; u64 cfg, ctx_cfg; int blkaddr; if (req->aura_sz > NPA_AURA_SZ_MAX || req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools) return NPA_AF_ERR_PARAM; if (req->way_mask) req->way_mask &= 0xFFFF; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (!pfvf->npalf || blkaddr < 0) return NPA_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; npalf = rvu_get_lf(rvu, block, pcifunc, 0); if (npalf < 0) return NPA_AF_ERR_AF_LF_INVALID; /* Reset this NPA LF */ err = rvu_lf_reset(rvu, block, npalf); if (err) { dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); return NPA_AF_ERR_LF_RESET; } ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1); /* Alloc memory for aura HW contexts */ hwctx_size = 1UL << (ctx_cfg & 0xF); err = qmem_alloc(rvu->dev, &pfvf->aura_ctx, NPA_AURA_COUNT(req->aura_sz), hwctx_size); if (err) goto free_mem; pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), GFP_KERNEL); if (!pfvf->aura_bmap) goto free_mem; /* Alloc memory for pool HW contexts */ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size); if (err) goto free_mem; pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), GFP_KERNEL); if (!pfvf->pool_bmap) goto free_mem; /* Get no of queue interrupts supported */ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); qints = (cfg >> 28) & 0xFFF; /* Alloc memory for Qints HW contexts */ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size); if (err) goto free_mem; cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf)); /* Clear way partition mask and set aura offset to '0' */ cfg &= ~(BIT_ULL(34) - 1); /* Set aura size & enable caching of contexts */ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask; rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg); /* Configure aura HW context's base */ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf), (u64)pfvf->aura_ctx->iova); /* Enable caching of qints hw context */ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36) | req->way_mask << 20); rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf), (u64)pfvf->npa_qints_ctx->iova); goto exit; free_mem: npa_ctx_free(rvu, pfvf); rc = -ENOMEM; exit: /* set stack page info */ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; rsp->stack_pg_bytes = cfg & 0xFF; rsp->qints = (cfg >> 28) & 0xFFF; if (!is_rvu_otx2(rvu)) { cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); rsp->cache_lines = (cfg >> 1) & 0x3F; } return rc; } int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; struct rvu_pfvf *pfvf; int npalf, err; int blkaddr; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); if (!pfvf->npalf || blkaddr < 0) return NPA_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; npalf = rvu_get_lf(rvu, block, pcifunc, 0); if (npalf < 0) return NPA_AF_ERR_AF_LF_INVALID; /* Reset this NPA LF */ err = rvu_lf_reset(rvu, block, npalf); if (err) { dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf); return NPA_AF_ERR_LF_RESET; } npa_ctx_free(rvu, pfvf); return 0; } static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) { u64 cfg; int err; /* Set admin queue endianness */ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG); #ifdef __BIG_ENDIAN cfg |= BIT_ULL(1); rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); #else cfg &= ~BIT_ULL(1); rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); #endif /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); cfg &= ~0x03DULL; #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING /* Disable caching of stack pages */ cfg |= 0x10ULL; #endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); /* For CN10K NPA BATCH DMA set 35 cache lines */ if (!is_rvu_otx2(rvu)) { cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); cfg &= ~0x7EULL; cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1); rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg); } /* Result structure can be followed by Aura/Pool context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations. */ err = rvu_aq_alloc(rvu, &block->aq, Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s), ALIGN(sizeof(struct npa_aq_res_s), 128) + 256); if (err) return err; rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE); rvu_write64(rvu, block->addr, NPA_AF_AQ_BASE, (u64)block->aq->inst->iova); return 0; } int rvu_npa_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return 0; /* Initialize admin queue */ return npa_aq_init(rvu, &hw->block[blkaddr]); } void rvu_npa_freemem(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return; block = &hw->block[blkaddr]; rvu_aq_free(rvu, block->aq); } void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; /* Disable all pools */ ctx_req.hdr.pcifunc = pcifunc; ctx_req.ctype = NPA_AQ_CTYPE_POOL; npa_lf_hwctx_disable(rvu, &ctx_req); /* Disable all auras */ ctx_req.ctype = NPA_AQ_CTYPE_AURA; npa_lf_hwctx_disable(rvu, &ctx_req); npa_ctx_free(rvu, pfvf); } /* Due to an Hardware errata, in some corner cases, AQ context lock * operations can result in a NDC way getting into an illegal state * of not valid but locked. * * This API solves the problem by clearing the lock bit of the NDC block. * The operation needs to be done for each line of all the NDC banks. */ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr) { int bank, max_bank, line, max_line, err; u64 reg, ndc_af_const; /* Set the ENABLE bit(63) to '0' */ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL); rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0)); /* Poll until the BUSY bits(47:32) are set to '0' */ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true); if (err) { dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n"); return err; } ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST); max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const); for (bank = 0; bank < max_bank; bank++) { for (line = 0; line < max_line; line++) { /* Check if 'cache line valid bit(63)' is not set * but 'cache line lock bit(60)' is set and on * success, reset the lock bit(60). */ reg = rvu_read64(rvu, blkaddr, NDC_AF_BANKX_LINEX_METADATA(bank, line)); if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) { rvu_write64(rvu, blkaddr, NDC_AF_BANKX_LINEX_METADATA(bank, line), reg & ~BIT_ULL(60)); } } } return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/types.h> #include <linux/module.h> #include <linux/pci.h> #include "rvu.h" #include "cgx.h" #include "lmac_common.h" #include "rvu_reg.h" #include "rvu_trace.h" #include "rvu_npc_hash.h" struct cgx_evq_entry { struct list_head evq_node; struct cgx_link_event link_event; }; #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ { \ struct _req_type *req; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ sizeof(struct _rsp_type)); \ if (!req) \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ return req; \ } MBOX_UP_CGX_MESSAGES #undef M bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) { u8 cgx_id, lmac_id; void *cgxd; if (!is_pf_cgxmapped(rvu, pf)) return 0; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); return (cgx_features_get(cgxd) & feature); } #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) /* Returns bitmap of mapped PFs */ static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); /* Assumes only one pf mapped to a cgx lmac port */ if (!pfmap) return -ENODEV; else return find_first_bit(&pfmap, rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); } static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) { return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); } void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) { if (cgx_id >= rvu->cgx_cnt_max) return NULL; return rvu->cgx_idmap[cgx_id]; } /* Return first enabled CGX instance if none are enabled then return NULL */ void *rvu_first_cgx_pdata(struct rvu *rvu) { int first_enabled_cgx = 0; void *cgxd = NULL; for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); if (cgxd) break; } return cgxd; } /* Based on P2X connectivity find mapped NIX block for a PF */ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, int cgx_id, int lmac_id) { struct rvu_pfvf *pfvf = &rvu->pf[pf]; u8 p2x; p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ pfvf->nix_blkaddr = BLKADDR_NIX0; if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1) pfvf->nix_blkaddr = BLKADDR_NIX1; } static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; int cgx_cnt_max = rvu->cgx_cnt_max; int pf = PF_CGXMAP_BASE; unsigned long lmac_bmap; int size, free_pkind; int cgx, lmac, iter; int numvfs, hwvfs; if (!cgx_cnt_max) return 0; if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; /* Initialize all entries with an invalid cgx and lmac id */ memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; rvu->cgx_mapped_pfs = 0; for (cgx = 0; cgx < cgx_cnt_max; cgx++) { if (!rvu_cgx_pdata(cgx, rvu)) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; free_pkind = rvu_alloc_rsrc(&pkind->rsrc); pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); rvu->cgx_mapped_vfs += numvfs; pf++; } } return 0; } static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) { struct cgx_evq_entry *qentry; unsigned long flags; int err; qentry = kmalloc(sizeof(*qentry), GFP_KERNEL); if (!qentry) return -ENOMEM; /* Lock the event queue before we read the local link status */ spin_lock_irqsave(&rvu->cgx_evq_lock, flags); err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, &qentry->link_event.link_uinfo); qentry->link_event.cgx_id = cgx_id; qentry->link_event.lmac_id = lmac_id; if (err) { kfree(qentry); goto skip_add; } list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); skip_add: spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); /* start worker to process the events */ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); return 0; } /* This is called from interrupt context and is expected to be atomic */ static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) { struct cgx_evq_entry *qentry; struct rvu *rvu = data; /* post event to the event queue */ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); if (!qentry) return -ENOMEM; qentry->link_event = *event; spin_lock(&rvu->cgx_evq_lock); list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); spin_unlock(&rvu->cgx_evq_lock); /* start worker to process the events */ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); return 0; } static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) { struct cgx_link_user_info *linfo; struct cgx_link_info_msg *msg; unsigned long pfmap; int err, pfid; linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); if (!pfmap) { dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n", event->cgx_id, event->lmac_id); return; } do { pfid = find_first_bit(&pfmap, rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); clear_bit(pfid, &pfmap); /* check if notification is enabled */ if (!test_bit(pfid, &rvu->pf_notify_bmap)) { dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", event->cgx_id, event->lmac_id, linfo->link_up ? "UP" : "DOWN"); continue; } /* Send mbox message to PF */ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); if (!msg) continue; msg->link_info = *linfo; otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); if (err) dev_warn(rvu->dev, "notification to pf %d failed\n", pfid); } while (pfmap); } static void cgx_evhandler_task(struct work_struct *work) { struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); struct cgx_evq_entry *qentry; struct cgx_link_event *event; unsigned long flags; do { /* Dequeue an event */ spin_lock_irqsave(&rvu->cgx_evq_lock, flags); qentry = list_first_entry_or_null(&rvu->cgx_evq_head, struct cgx_evq_entry, evq_node); if (qentry) list_del(&qentry->evq_node); spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); if (!qentry) break; /* nothing more to process */ event = &qentry->link_event; /* process event */ cgx_notify_pfs(event, rvu); kfree(qentry); } while (1); } static int cgx_lmac_event_handler_init(struct rvu *rvu) { unsigned long lmac_bmap; struct cgx_event_cb cb; int cgx, lmac, err; void *cgxd; spin_lock_init(&rvu->cgx_evq_lock); INIT_LIST_HEAD(&rvu->cgx_evq_head); INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); if (!rvu->cgx_evh_wq) { dev_err(rvu->dev, "alloc workqueue failed"); return -ENOMEM; } cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ cb.data = rvu; for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) dev_err(rvu->dev, "%d:%d handler register failed\n", cgx, lmac); } } return 0; } static void rvu_cgx_wq_destroy(struct rvu *rvu) { if (rvu->cgx_evh_wq) { destroy_workqueue(rvu->cgx_evh_wq); rvu->cgx_evh_wq = NULL; } } int rvu_cgx_init(struct rvu *rvu) { int cgx, err; void *cgxd; /* CGX port id starts from 0 and are not necessarily contiguous * Hence we allocate resources based on the maximum port id value. */ rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); return 0; } rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * sizeof(void *), GFP_KERNEL); if (!rvu->cgx_idmap) return -ENOMEM; /* Initialize the cgxdata table */ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); /* Map CGX LMAC interfaces to RVU PFs */ err = rvu_map_cgx_lmac_pf(rvu); if (err) return err; /* Register for CGX events */ err = cgx_lmac_event_handler_init(rvu); if (err) return err; mutex_init(&rvu->cgx_cfg_lock); /* Ensure event handler registration is completed, before * we turn on the links */ mb(); /* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue; err = cgx_lmac_linkup_start(cgxd); if (err) dev_err(rvu->dev, "Link up process failed to start on cgx %d\n", cgx); } return 0; } int rvu_cgx_exit(struct rvu *rvu) { unsigned long lmac_bmap; int cgx, lmac; void *cgxd; for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) cgx_lmac_evh_unregister(cgxd, lmac); } /* Ensure event handler unregister is completed */ mb(); rvu_cgx_wq_destroy(rvu); return 0; } /* Most of the CGX configuration is restricted to the mapped PF only, * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not. */ inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) return false; return true; } void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) { struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; if (!is_pf_cgxmapped(rvu, pf)) return; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ if (enable) mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); else mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); } int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { int pf = rvu_get_pf(pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; if (!is_cgx_config_permitted(rvu, pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); } int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) { struct mac_ops *mac_ops; mac_ops = get_mac_ops(cgxd); return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); } void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { int pf = rvu_get_pf(pcifunc); int i = 0, lmac_count = 0; struct mac_ops *mac_ops; u8 max_dmac_filters; u8 cgx_id, lmac_id; void *cgx_dev; if (!is_cgx_config_permitted(rvu, pcifunc)) return; if (rvu_npc_exact_has_match_table(rvu)) { rvu_npc_exact_reset(rvu, pcifunc); return; } rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_dev = cgx_get_pdata(cgx_id); lmac_count = cgx_get_lmac_cnt(cgx_dev); mac_ops = get_mac_ops(cgx_dev); if (!mac_ops) return; max_dmac_filters = mac_ops->dmac_filter_count / lmac_count; for (i = 0; i < max_dmac_filters; i++) cgx_lmac_addr_del(cgx_id, lmac_id, i); /* As cgx_lmac_addr_del does not clear entry for index 0 * so it needs to be done explicitly */ cgx_lmac_addr_reset(cgx_id, lmac_id); } int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); return 0; } int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); return 0; } static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; u8 cgx_idx, lmac; void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); mac_ops = get_mac_ops(cgxd); /* Rx stats */ while (stat < mac_ops->rx_stats_cnt) { err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); if (err) return err; if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; else ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; stat++; } /* Tx stats */ stat = 0; while (stat < mac_ops->tx_stats_cnt) { err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); if (err) return err; if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; else ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; stat++; } return 0; } int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp) { return rvu_lmac_get_stats(rvu, req, (void *)rsp); } int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, struct rpm_stats_rsp *rsp) { return rvu_lmac_get_stats(rvu, req, (void *)rsp); } int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); mac_ops = get_mac_ops(cgxd); return mac_ops->get_fec_stats(cgxd, lmac, rsp); } int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_set(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); return 0; } int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_add(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); if (rc >= 0) { rsp->index = rc; return 0; } return rc; } int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_del(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); } int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct msg_req *req, struct cgx_max_dmac_entries_get_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; /* If msg is received from PFs(which are not mapped to CGX LMACs) * or VF then no entries are allocated for DMAC filters at CGX level. * So returning zero. */ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { rsp->max_dmac_filters = 0; return 0; } if (rvu_npc_exact_has_match_table(rvu)) { rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); return 0; } rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); return 0; } int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; u64 cfg; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rsp->hdr.rc = rc; cfg = cgx_lmac_addr_get(cgx_id, lmac_id); /* copy 48 bit mac address to req->mac_addr */ u64_to_ether_addr(cfg, rsp->mac_addr); return 0; } int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; int pf = rvu_get_pf(pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; /* Disable drop on non hit rule */ if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_promisc_config(cgx_id, lmac_id, true); return 0; } int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; /* Disable drop on non hit rule */ if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_promisc_config(cgx_id, lmac_id, false); return 0; } static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int pf = rvu_get_pf(pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; /* This msg is expected only from PFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; /* This flag is required to clean up CGX conf if app gets killed */ pfvf->hw_rx_tstamp_en = enable; /* Inform MCS about 8B RX header */ rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable); return 0; } int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) return -EPERM; return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); } int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false); } static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) { int pf = rvu_get_pf(pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); if (en) { set_bit(pf, &rvu->pf_notify_bmap); /* Send the current link status to PF */ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); } else { clear_bit(pf, &rvu->pf_notify_bmap); } return 0; } int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); return 0; } int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); return 0; } int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp) { u8 cgx_id, lmac_id; int pf, err; pf = rvu_get_pf(req->hdr.pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, &rsp->link_info); return err; } int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; if (!is_pf_cgxmapped(rvu, pf)) return 0; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); rsp->lmac_features = cgx_features_get(cgxd); return 0; } u32 rvu_cgx_get_fifolen(struct rvu *rvu) { struct mac_ops *mac_ops; u32 fifo_len; mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); fifo_len = mac_ops ? mac_ops->fifo_len : 0; return fifo_len; } u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) { struct mac_ops *mac_ops; void *cgxd; cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) return 0; mac_ops = get_mac_ops(cgxd); if (!mac_ops->lmac_fifo_len) return 0; return mac_ops->lmac_fifo_len(cgxd, lmac); } static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { int pf = rvu_get_pf(pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), lmac_id, en); } int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); return 0; } int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); return 0; } int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) { int pf = rvu_get_pf(pcifunc); u8 rx_pfc = 0, tx_pfc = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) return 0; /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ if (!is_pf_cgxmapped(rvu, pf)) return LMAC_AF_ERR_PF_NOT_MAPPED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc); if (tx_pfc || rx_pfc) { dev_warn(rvu->dev, "Can not configure 802.3X flow control as PFC frames are enabled"); return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED; } mutex_lock(&rvu->rsrc_lock); if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, pcifunc & RVU_PFVF_FUNC_MASK)) { mutex_unlock(&rvu->rsrc_lock); return LMAC_AF_ERR_PERM_DENIED; } mutex_unlock(&rvu->rsrc_lock); return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause); } int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; int err = 0; void *cgxd; /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); if (req->set) err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause); else mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); return err; } int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) return LMAC_AF_ERR_PF_NOT_MAPPED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); } /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those * from its VFs as well. ie. NIX rx/tx counters at the CGX port level */ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat) { struct rvu_block *block; int blkaddr; u16 pcifunc; int pf, lf; *stat = 0; if (!cgxd || !rvu) return -EINVAL; pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); if (pf < 0) return pf; /* Assumes LF of a PF and all of its VF belongs to the same * NIX block */ pcifunc = pf << RVU_PFVF_PF_SHIFT; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; block = &rvu->hw->block[blkaddr]; for (lf = 0; lf < block->lf.max; lf++) { /* Check if a lf is attached to this PF or one of its VFs */ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & ~RVU_PFVF_FUNC_MASK))) continue; if (rxtxflag == NIX_STATS_RX) *stat += rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(lf, index)); else *stat += rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(lf, index)); } return 0; } int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) { struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0; if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) return 0; parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); mutex_lock(&rvu->cgx_cfg_lock); if (start && pfvf->cgx_in_use) goto exit; /* CGX is already started hence nothing to do */ if (!start && !pfvf->cgx_in_use) goto exit; /* CGX is already stopped hence nothing to do */ if (start) { cgx_users = parent_pf->cgx_users; parent_pf->cgx_users++; } else { parent_pf->cgx_users--; cgx_users = parent_pf->cgx_users; } /* Start CGX when first of all NIXLFs is started. * Stop CGX when last of all NIXLFs is stopped. */ if (!cgx_users) { err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, start); if (err) { dev_err(rvu->dev, "Unable to %s CGX\n", start ? "start" : "stop"); /* Revert the usage count in case of error */ parent_pf->cgx_users = start ? parent_pf->cgx_users - 1 : parent_pf->cgx_users + 1; goto exit; } } pfvf->cgx_in_use = start; exit: mutex_unlock(&rvu->cgx_cfg_lock); return err; } int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) return -EPERM; if (req->fec == OTX2_FEC_OFF) req->fec = OTX2_FEC_NONE; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id); return 0; } int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!rvu->fwdata) return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED; if (!is_pf_cgxmapped(rvu, pf)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], sizeof(struct cgx_lmac_fwdata_s)); else memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], sizeof(struct cgx_lmac_fwdata_s)); return 0; } int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); return cgx_lmac_addr_reset(cgx_id, lmac_id); } int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_update(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); } int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en) { int pf = rvu_get_pf(pcifunc); u8 rx_8023 = 0, tx_8023 = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023); if (tx_8023 || rx_8023) { dev_warn(rvu->dev, "Can not configure PFC as 802.3X pause frames are enabled"); return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED; } mutex_lock(&rvu->rsrc_lock); if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause, pcifunc & RVU_PFVF_FUNC_MASK)) { mutex_unlock(&rvu->rsrc_lock); return LMAC_AF_ERR_PERM_DENIED; } mutex_unlock(&rvu->rsrc_lock); return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en); } int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; int err; /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause, req->pfc_en); mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause); return err; } void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) { int pf = rvu_get_pf(pcifunc); struct mac_ops *mac_ops; struct cgx *cgxd; u8 cgx, lmac; if (!is_pf_cgxmapped(rvu, pf)) return; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); cgxd = rvu_cgx_pdata(cgx, rvu); mac_ops = get_mac_ops(cgxd); if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc))) dev_err(rvu->dev, "Failed to reset MAC\n"); }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2022 Marvell. * */ #include <linux/bitfield.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/stddef.h> #include <linux/debugfs.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" #include "npc.h" #include "cgx.h" #include "rvu_npc_fs.h" #include "rvu_npc_hash.h" static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit, size_t width_bits) { const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits); const size_t msb = start_bit + width_bits - 1; const size_t lword = start_bit >> 6; const size_t uword = msb >> 6; size_t lbits; u64 hi, lo; if (lword == uword) return (input[lword] >> (start_bit & 63)) & mask; lbits = 64 - (start_bit & 63); hi = input[uword]; lo = (input[lword] >> (start_bit & 63)); return ((hi << lbits) | lo) & mask; } static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len) { u64 prev_orig_word = 0; u64 cur_orig_word = 0; size_t extra = key_bit_len % 64; size_t max_idx = key_bit_len / 64; size_t i; if (extra) max_idx++; for (i = 0; i < max_idx; i++) { cur_orig_word = key[i]; key[i] = key[i] << 1; key[i] |= ((prev_orig_word >> 63) & 0x1); prev_orig_word = cur_orig_word; } } static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len, size_t key_bit_len) { u32 hash_out = 0; u64 temp_data = 0; int i; for (i = data_bit_len - 1; i >= 0; i--) { temp_data = (data[i / 64]); temp_data = temp_data >> (i % 64); temp_data &= 0x1; if (temp_data) hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32)); rvu_npc_lshift_key(key, key_bit_len); } return hash_out; } u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp, u8 intf, u8 hash_idx) { u64 hash_key[3]; u64 data_padded[2]; u32 field_hash; hash_key[0] = rsp.secret_key[1] << 31; hash_key[0] |= rsp.secret_key[2]; hash_key[1] = rsp.secret_key[1] >> 33; hash_key[1] |= rsp.secret_key[0] << 31; hash_key[2] = rsp.secret_key[0] >> 33; data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0]; data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1]; field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159); field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]); field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]); return field_hash; } static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr, u8 intf, int lid, int lt, int ld) { u8 hdr, key; u64 cfg; cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)); hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); key = FIELD_GET(NPC_KEY_OFFSET, cfg); /* Update use_hash(bit-20) to 'true' and * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG */ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, hdr, 0x1, 0x0, key); return cfg; } static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; int lid, lt, ld, hash_cnt = 0; if (is_npc_intf_tx(intf)) return; /* Program HASH_CFG */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) { if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { u64 cfg; if (hash_cnt == NPC_MAX_HASH) return; cfg = npc_update_use_hash(rvu, blkaddr, intf, lid, lt, ld); /* Set updated KEX configuration */ SET_KEX_LD(intf, lid, lt, ld, cfg); /* Set HASH configuration */ SET_KEX_LD_HASH(intf, ld, mkex_hash->hash[intf][ld]); SET_KEX_LD_HASH_MASK(intf, ld, 0, mkex_hash->hash_mask[intf][ld][0]); SET_KEX_LD_HASH_MASK(intf, ld, 1, mkex_hash->hash_mask[intf][ld][1]); SET_KEX_LD_HASH_CTRL(intf, ld, mkex_hash->hash_ctrl[intf][ld]); hash_cnt++; } } } } } static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; int lid, lt, ld, hash_cnt = 0; if (is_npc_intf_rx(intf)) return; /* Program HASH_CFG */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { u64 cfg; if (hash_cnt == NPC_MAX_HASH) return; cfg = npc_update_use_hash(rvu, blkaddr, intf, lid, lt, ld); /* Set updated KEX configuration */ SET_KEX_LD(intf, lid, lt, ld, cfg); /* Set HASH configuration */ SET_KEX_LD_HASH(intf, ld, mkex_hash->hash[intf][ld]); SET_KEX_LD_HASH_MASK(intf, ld, 0, mkex_hash->hash_mask[intf][ld][0]); SET_KEX_LD_HASH_MASK(intf, ld, 1, mkex_hash->hash_mask[intf][ld][1]); SET_KEX_LD_HASH_CTRL(intf, ld, mkex_hash->hash_ctrl[intf][ld]); hash_cnt++; } } } } void npc_config_secret_key(struct rvu *rvu, int blkaddr) { struct hw_cap *hwcap = &rvu->hw->cap; struct rvu_hwinfo *hw = rvu->hw; u8 intf; if (!hwcap->npc_hash_extract) return; for (intf = 0; intf < hw->npc_intfs; intf++) { rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf), RVU_NPC_HASH_SECRET_KEY0); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf), RVU_NPC_HASH_SECRET_KEY1); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf), RVU_NPC_HASH_SECRET_KEY2); } } void npc_program_mkex_hash(struct rvu *rvu, int blkaddr) { struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash; struct hw_cap *hwcap = &rvu->hw->cap; u8 intf, ld, hdr_offset, byte_len; struct rvu_hwinfo *hw = rvu->hw; u64 cfg; /* Check if hardware supports hash extraction */ if (!hwcap->npc_hash_extract) return; /* Check if IPv6 source/destination address * should be hash enabled. * Hashing reduces 128bit SIP/DIP fields to 32bit * so that 224 bit X2 key can be used for IPv6 based filters as well, * which in turn results in more number of MCAM entries available for * use. * * Hashing of IPV6 SIP/DIP is enabled in below scenarios * 1. If the silicon variant supports hashing feature * 2. If the number of bytes of IP addr being extracted is 4 bytes ie * 32bit. The assumption here is that if user wants 8bytes of LSB of * IP addr or full 16 bytes then his intention is not to use 32bit * hash. */ for (intf = 0; intf < hw->npc_intfs; intf++) { for (ld = 0; ld < NPC_MAX_LD; ld++) { cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, NPC_LID_LC, NPC_LT_LC_IP6, ld)); hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg); byte_len = FIELD_GET(NPC_BYTESM, cfg); /* Hashing of IPv6 source/destination address should be * enabled if, * hdr_offset == 8 (offset of source IPv6 address) or * hdr_offset == 24 (offset of destination IPv6) * address) and the number of byte to be * extracted is 4. As per hardware configuration * byte_len should be == actual byte_len - 1. * Hence byte_len is checked against 3 but nor 4. */ if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3) mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true; } } /* Update hash configuration if the field is hash enabled */ for (intf = 0; intf < hw->npc_intfs; intf++) { npc_program_mkex_hash_rx(rvu, blkaddr, intf); npc_program_mkex_hash_tx(rvu, blkaddr, intf); } } void npc_update_field_hash(struct rvu *rvu, u8 intf, struct mcam_entry *entry, int blkaddr, u64 features, struct flow_msg *pkt, struct flow_msg *mask, struct flow_msg *opkt, struct flow_msg *omask) { struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; struct npc_get_field_hash_info_req req; struct npc_get_field_hash_info_rsp rsp; u64 ldata[2], cfg; u32 field_hash; u8 hash_idx; if (!rvu->hw->cap.npc_hash_extract) { dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__); return; } req.intf = intf; rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp); for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) { cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx)); if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) { u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8; u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4; u8 ltype_mask = cfg & GENMASK_ULL(3, 0); if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) { switch (ltype & ltype_mask) { /* If hash extract enabled is supported for IPv6 then * 128 bit IPv6 source and destination addressed * is hashed to 32 bit value. */ case NPC_LT_LC_IP6: /* ld[0] == hash_idx[0] == Source IPv6 * ld[1] == hash_idx[1] == Destination IPv6 */ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) { u32 src_ip[IPV6_WORDS]; be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); ldata[1] = (u64)src_ip[0] << 32 | src_ip[1]; ldata[0] = (u64)src_ip[2] << 32 | src_ip[3]; field_hash = npc_field_hash_calc(ldata, rsp, intf, hash_idx); npc_update_entry(rvu, NPC_SIP_IPV6, entry, field_hash, 0, GENMASK(31, 0), 0, intf); memcpy(&opkt->ip6src, &pkt->ip6src, sizeof(pkt->ip6src)); memcpy(&omask->ip6src, &mask->ip6src, sizeof(mask->ip6src)); } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) { u32 dst_ip[IPV6_WORDS]; be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1]; ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3]; field_hash = npc_field_hash_calc(ldata, rsp, intf, hash_idx); npc_update_entry(rvu, NPC_DIP_IPV6, entry, field_hash, 0, GENMASK(31, 0), 0, intf); memcpy(&opkt->ip6dst, &pkt->ip6dst, sizeof(pkt->ip6dst)); memcpy(&omask->ip6dst, &mask->ip6dst, sizeof(mask->ip6dst)); } break; } } } } } int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu, struct npc_get_field_hash_info_req *req, struct npc_get_field_hash_info_rsp *rsp) { u64 *secret_key = rsp->secret_key; u8 intf = req->intf; int i, j, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL; } secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf)); secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf)); secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf)); for (i = 0; i < NPC_MAX_HASH; i++) { for (j = 0; j < NPC_MAX_HASH_MASK; j++) { rsp->hash_mask[NIX_INTF_RX][i][j] = GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j); rsp->hash_mask[NIX_INTF_TX][i][j] = GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j); } } for (i = 0; i < NPC_MAX_INTF; i++) for (j = 0; j < NPC_MAX_HASH; j++) rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j); return 0; } /** * rvu_exact_prepare_mdata - Make mdata for mcam entry * @mac: MAC address * @chan: Channel number. * @ctype: Channel Type. * @mask: LDATA mask. * Return: Meta data */ static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask) { u64 ldata = ether_addr_to_u64(mac); /* Please note that mask is 48bit which excludes chan and ctype. * Increase mask bits if we need to include them as well. */ ldata |= ((u64)chan << 48); ldata |= ((u64)ctype << 60); ldata &= mask; ldata = ldata << 2; return ldata; } /** * rvu_exact_calculate_hash - calculate hash index to mem table. * @rvu: resource virtualization unit. * @chan: Channel number * @ctype: Channel type. * @mac: MAC address * @mask: HASH mask. * @table_depth: Depth of table. * Return: Hash value */ static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac, u64 mask, u32 table_depth) { struct npc_exact_table *table = rvu->hw->table; u64 hash_key[2]; u64 key_in[2]; u64 ldata; u32 hash; key_in[0] = RVU_NPC_HASH_SECRET_KEY0; key_in[1] = RVU_NPC_HASH_SECRET_KEY2; hash_key[0] = key_in[0] << 31; hash_key[0] |= key_in[1]; hash_key[1] = key_in[0] >> 33; ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask); dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__, ldata, hash_key[1], hash_key[0]); hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95); hash &= table->mem_table.hash_mask; hash += table->mem_table.hash_offset; dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash); return hash; } /** * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table. * @rvu: resource virtualization unit. * @way: Indicate way to table. * @index: Hash index to 4 way table. * @hash: Hash value. * * Searches 4 way table using hash index. Returns 0 on success. * Return: 0 upon success. */ static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way, u32 *index, unsigned int hash) { struct npc_exact_table *table; int depth, i; table = rvu->hw->table; depth = table->mem_table.depth; /* Check all the 4 ways for a free slot. */ mutex_lock(&table->lock); for (i = 0; i < table->mem_table.ways; i++) { if (test_bit(hash + i * depth, table->mem_table.bmap)) continue; set_bit(hash + i * depth, table->mem_table.bmap); mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n", __func__, i, hash); *way = i; *index = hash; return 0; } mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__, bitmap_weight(table->mem_table.bmap, table->mem_table.depth)); return -ENOSPC; } /** * rvu_npc_exact_free_id - Free seq id from bitmat. * @rvu: Resource virtualization unit. * @seq_id: Sequence identifier to be freed. */ static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id) { struct npc_exact_table *table; table = rvu->hw->table; mutex_lock(&table->lock); clear_bit(seq_id, table->id_bmap); mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id); } /** * rvu_npc_exact_alloc_id - Alloc seq id from bitmap. * @rvu: Resource virtualization unit. * @seq_id: Sequence identifier. * Return: True or false. */ static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id) { struct npc_exact_table *table; u32 idx; table = rvu->hw->table; mutex_lock(&table->lock); idx = find_first_zero_bit(table->id_bmap, table->tot_ids); if (idx == table->tot_ids) { mutex_unlock(&table->lock); dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n", __func__, table->tot_ids); return false; } /* Mark bit map to indicate that slot is used.*/ set_bit(idx, table->id_bmap); mutex_unlock(&table->lock); *seq_id = idx; dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id); return true; } /** * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table. * @rvu: resource virtualization unit. * @index: Index to exact CAM table. * Return: 0 upon success; else error number. */ static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index) { struct npc_exact_table *table; u32 idx; table = rvu->hw->table; mutex_lock(&table->lock); idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth); if (idx == table->cam_table.depth) { mutex_unlock(&table->lock); dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__, bitmap_weight(table->cam_table.bmap, table->cam_table.depth)); return -ENOSPC; } /* Mark bit map to indicate that slot is used.*/ set_bit(idx, table->cam_table.bmap); mutex_unlock(&table->lock); *index = idx; dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n", __func__, idx); return 0; } /** * rvu_exact_prepare_table_entry - Data for exact match table entry. * @rvu: Resource virtualization unit. * @enable: Enable/Disable entry * @ctype: Software defined channel type. Currently set as 0. * @chan: Channel number. * @mac_addr: Destination mac address. * Return: mdata for exact match table. */ static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable, u8 ctype, u16 chan, u8 *mac_addr) { u64 ldata = ether_addr_to_u64(mac_addr); /* Enable or disable */ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0); /* Set Ctype */ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype); /* Set chan */ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan); /* MAC address */ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata); return mdata; } /** * rvu_exact_config_secret_key - Configure secret key. * @rvu: Resource virtualization unit. */ static void rvu_exact_config_secret_key(struct rvu *rvu) { int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX), RVU_NPC_HASH_SECRET_KEY0); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX), RVU_NPC_HASH_SECRET_KEY1); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX), RVU_NPC_HASH_SECRET_KEY2); } /** * rvu_exact_config_search_key - Configure search key * @rvu: Resource virtualization unit. */ static void rvu_exact_config_search_key(struct rvu *rvu) { int blkaddr; u64 reg_val; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); /* HDR offset */ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0); /* BYTESM1, number of bytes - 1 */ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1); /* Enable LID and set LID to NPC_LID_LA */ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1); reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA); /* Clear layer type based extraction */ /* Disable LT_EN */ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0); /* Set LTYPE_MATCH to 0 */ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0); /* Set LTYPE_MASK to 0 */ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0); rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val); } /** * rvu_exact_config_result_ctrl - Set exact table hash control * @rvu: Resource virtualization unit. * @depth: Depth of Exact match table. * * Sets mask and offset for hash for mem table. */ static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth) { int blkaddr; u64 reg = 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); /* Set mask. Note that depth is a power of 2 */ rvu->hw->table->mem_table.hash_mask = (depth - 1); reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1)); /* Set offset as 0 */ rvu->hw->table->mem_table.hash_offset = 0; reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0); /* Set reg for RX */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg); /* Store hash mask and offset for s/w algorithm */ } /** * rvu_exact_config_table_mask - Set exact table mask. * @rvu: Resource virtualization unit. */ static void rvu_exact_config_table_mask(struct rvu *rvu) { int blkaddr; u64 mask = 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); /* Don't use Ctype */ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0); /* Set chan */ mask |= GENMASK_ULL(59, 48); /* Full ldata */ mask |= GENMASK_ULL(47, 0); /* Store mask for s/w hash calcualtion */ rvu->hw->table->mem_table.mask = mask; /* Set mask for RX.*/ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask); } /** * rvu_npc_exact_get_max_entries - Get total number of entries in table. * @rvu: resource virtualization unit. * Return: Maximum table entries possible. */ u32 rvu_npc_exact_get_max_entries(struct rvu *rvu) { struct npc_exact_table *table; table = rvu->hw->table; return table->tot_ids; } /** * rvu_npc_exact_has_match_table - Checks support for exact match. * @rvu: resource virtualization unit. * Return: True if exact match table is supported/enabled. */ bool rvu_npc_exact_has_match_table(struct rvu *rvu) { return rvu->hw->cap.npc_exact_match_enabled; } /** * __rvu_npc_exact_find_entry_by_seq_id - find entry by id * @rvu: resource virtualization unit. * @seq_id: Sequence identifier. * * Caller should acquire the lock. * Return: Pointer to table entry. */ static struct npc_exact_table_entry * __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id) { struct npc_exact_table *table = rvu->hw->table; struct npc_exact_table_entry *entry = NULL; struct list_head *lhead; lhead = &table->lhead_gbl; /* traverse to find the matching entry */ list_for_each_entry(entry, lhead, glist) { if (entry->seq_id != seq_id) continue; return entry; } return NULL; } /** * rvu_npc_exact_add_to_list - Add entry to list * @rvu: resource virtualization unit. * @opc_type: OPCODE to select MEM/CAM table. * @ways: MEM table ways. * @index: Index in MEM/CAM table. * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @mac_addr: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @seq_id: Sequence identifier * @cmd: True if function is called by ethtool cmd * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam. * @pcifunc: pci function * Return: 0 upon success. */ static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways, u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan, u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc) { struct npc_exact_table_entry *entry, *tmp, *iter; struct npc_exact_table *table = rvu->hw->table; struct list_head *lhead, *pprev; WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS); if (!rvu_npc_exact_alloc_id(rvu, seq_id)) { dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__); return -EFAULT; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { rvu_npc_exact_free_id(rvu, *seq_id); dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__); return -ENOMEM; } mutex_lock(&table->lock); switch (opc_type) { case NPC_EXACT_OPC_CAM: lhead = &table->lhead_cam_tbl_entry; table->cam_tbl_entry_cnt++; break; case NPC_EXACT_OPC_MEM: lhead = &table->lhead_mem_tbl_entry[ways]; table->mem_tbl_entry_cnt++; break; default: mutex_unlock(&table->lock); kfree(entry); rvu_npc_exact_free_id(rvu, *seq_id); dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type); return -EINVAL; } /* Add to global list */ INIT_LIST_HEAD(&entry->glist); list_add_tail(&entry->glist, &table->lhead_gbl); INIT_LIST_HEAD(&entry->list); entry->index = index; entry->ways = ways; entry->opc_type = opc_type; entry->pcifunc = pcifunc; ether_addr_copy(entry->mac, mac_addr); entry->chan = chan; entry->ctype = ctype; entry->cgx_id = cgx_id; entry->lmac_id = lmac_id; entry->seq_id = *seq_id; entry->mcam_idx = mcam_idx; entry->cmd = cmd; pprev = lhead; /* Insert entry in ascending order of index */ list_for_each_entry_safe(iter, tmp, lhead, list) { if (index < iter->index) break; pprev = &iter->list; } /* Add to each table list */ list_add(&entry->list, pprev); mutex_unlock(&table->lock); return 0; } /** * rvu_npc_exact_mem_table_write - Wrapper for register write * @rvu: resource virtualization unit. * @blkaddr: Block address * @ways: ways for MEM table. * @index: Index in MEM * @mdata: Meta data to be written to register. */ static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways, u32 index, u64 mdata) { rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata); } /** * rvu_npc_exact_cam_table_write - Wrapper for register write * @rvu: resource virtualization unit. * @blkaddr: Block address * @index: Index in MEM * @mdata: Meta data to be written to register. */ static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr, u32 index, u64 mdata) { rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata); } /** * rvu_npc_exact_dealloc_table_entry - dealloc table entry * @rvu: resource virtualization unit. * @opc_type: OPCODE for selection of table(MEM or CAM) * @ways: ways if opc_type is MEM table. * @index: Index of MEM or CAM table. * Return: 0 upon success. */ static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways, u32 index) { int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); struct npc_exact_table *table; u8 null_dmac[6] = { 0 }; int depth; /* Prepare entry with all fields set to zero */ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac); table = rvu->hw->table; depth = table->mem_table.depth; mutex_lock(&table->lock); switch (opc_type) { case NPC_EXACT_OPC_CAM: /* Check whether entry is used already */ if (!test_bit(index, table->cam_table.bmap)) { mutex_unlock(&table->lock); dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n", __func__, ways, index); return -EINVAL; } rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata); clear_bit(index, table->cam_table.bmap); break; case NPC_EXACT_OPC_MEM: /* Check whether entry is used already */ if (!test_bit(index + ways * depth, table->mem_table.bmap)) { mutex_unlock(&table->lock); dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n", __func__, index); return -EINVAL; } rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata); clear_bit(index + ways * depth, table->mem_table.bmap); break; default: mutex_unlock(&table->lock); dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type); return -ENOSPC; } mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n", __func__, index, ways, opc_type); return 0; } /** * rvu_npc_exact_alloc_table_entry - Allociate an entry * @rvu: resource virtualization unit. * @mac: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @index: Index of MEM table or CAM table. * @ways: Ways. Only valid for MEM table. * @opc_type: OPCODE to select table (MEM or CAM) * * Try allocating a slot from MEM table. If all 4 ways * slot are full for a hash index, check availability in * 32-entry CAM table for allocation. * Return: 0 upon success. */ static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype, u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type) { struct npc_exact_table *table; unsigned int hash; int err; table = rvu->hw->table; /* Check in 4-ways mem entry for free slote */ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask, table->mem_table.depth); err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash); if (!err) { *opc_type = NPC_EXACT_OPC_MEM; dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n", __func__, *ways, *index); return 0; } dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__); /* wayss is 0 for cam table */ *ways = 0; err = rvu_npc_exact_alloc_cam_table_entry(rvu, index); if (!err) { *opc_type = NPC_EXACT_OPC_CAM; dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n", __func__, *index); return 0; } dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__); return -ENOSPC; } /** * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base. * @rvu: resource virtualization unit. * @drop_mcam_idx: Drop rule index in NPC mcam. * @chan_val: Channel value. * @chan_mask: Channel Mask. * @pcifunc: pcifunc of interface. * Return: True upon success. */ static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx, u64 chan_val, u64 chan_mask, u16 pcifunc) { struct npc_exact_table *table; int i; table = rvu->hw->table; for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break; if (table->drop_rule_map[i].chan_val != (u16)chan_val) continue; if (table->drop_rule_map[i].chan_mask != (u16)chan_mask) continue; return false; } if (i == NPC_MCAM_DROP_RULE_MAX) return false; table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx; table->drop_rule_map[i].chan_val = (u16)chan_val; table->drop_rule_map[i].chan_mask = (u16)chan_mask; table->drop_rule_map[i].pcifunc = pcifunc; table->drop_rule_map[i].valid = true; return true; } /** * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask. * @rvu: resource virtualization unit. * @intf_type: Interface type (SDK, LBK or CGX) * @cgx_id: CGX identifier. * @lmac_id: LAMC identifier. * @val: Channel number. * @mask: Channel mask. * Return: True upon success. */ static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type, u8 cgx_id, u8 lmac_id, u64 *val, u64 *mask) { u16 chan_val, chan_mask; /* No support for SDP and LBK */ if (intf_type != NIX_INTF_TYPE_CGX) return false; chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); chan_mask = 0xfff; if (val) *val = chan_val; if (mask) *mask = chan_mask; return true; } /** * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc * @rvu: resource virtualization unit. * @drop_rule_idx: Drop rule index in NPC mcam. * * Debugfs (exact_drop_cnt) entry displays pcifunc for interface * by retrieving the pcifunc value from data base. * Return: Drop rule index. */ u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx) { struct npc_exact_table *table; int i; table = rvu->hw->table; for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break; if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx) continue; return table->drop_rule_map[i].pcifunc; } dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", __func__, drop_rule_idx); return -1; } /** * rvu_npc_exact_get_drop_rule_info - Get drop rule information. * @rvu: resource virtualization unit. * @intf_type: Interface type (CGX, SDP or LBK) * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @drop_mcam_idx: NPC mcam drop rule index. * @val: Channel value. * @mask: Channel mask. * @pcifunc: pcifunc of interface corresponding to the drop rule. * Return: True upon success. */ static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id, u8 lmac_id, u32 *drop_mcam_idx, u64 *val, u64 *mask, u16 *pcifunc) { struct npc_exact_table *table; u64 chan_val, chan_mask; bool rc; int i; table = rvu->hw->table; if (intf_type != NIX_INTF_TYPE_CGX) { dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__); return false; } rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id, lmac_id, &chan_val, &chan_mask); if (!rc) return false; for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) { if (!table->drop_rule_map[i].valid) break; if (table->drop_rule_map[i].chan_val != (u16)chan_val) continue; if (val) *val = table->drop_rule_map[i].chan_val; if (mask) *mask = table->drop_rule_map[i].chan_mask; if (pcifunc) *pcifunc = table->drop_rule_map[i].pcifunc; *drop_mcam_idx = i; return true; } if (i == NPC_MCAM_DROP_RULE_MAX) { dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n", __func__, *drop_mcam_idx); return false; } dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n", __func__, cgx_id, lmac_id); return false; } /** * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule. * @rvu: resource virtualization unit. * @drop_mcam_idx: NPC mcam drop rule index. * @val: +1 or -1. * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it. * * when first exact match entry against a drop rule is added, enable_or_disable_cam * is set to true. When last exact match entry against a drop rule is deleted, * enable_or_disable_cam is set to true. * Return: Number of rules */ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx, int val, bool *enable_or_disable_cam) { struct npc_exact_table *table; u16 *cnt, old_cnt; bool promisc; table = rvu->hw->table; promisc = table->promisc_mode[drop_mcam_idx]; cnt = &table->cnt_cmd_rules[drop_mcam_idx]; old_cnt = *cnt; *cnt += val; if (!enable_or_disable_cam) goto done; *enable_or_disable_cam = false; if (promisc) goto done; /* If all rules are deleted and not already in promisc mode; * disable cam */ if (!*cnt && val < 0) { *enable_or_disable_cam = true; goto done; } /* If rule got added and not already in promisc mode; enable cam */ if (!old_cnt && val > 0) { *enable_or_disable_cam = true; goto done; } done: return *cnt; } /** * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry. * @rvu: resource virtualization unit. * @seq_id: Sequence identifier of the entry. * * Deletes entry from linked lists and free up slot in HW MEM or CAM * table. * Return: 0 upon success. */ static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id) { struct npc_exact_table_entry *entry = NULL; struct npc_exact_table *table; bool disable_cam = false; u32 drop_mcam_idx = -1; int *cnt; bool rc; table = rvu->hw->table; mutex_lock(&table->lock); /* Lookup for entry which needs to be updated */ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id); if (!entry) { dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id); mutex_unlock(&table->lock); return -ENODATA; } cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt : &table->mem_tbl_entry_cnt; /* delete from lists */ list_del_init(&entry->list); list_del_init(&entry->glist); (*cnt)--; rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id, entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL); if (!rc) { dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n", __func__, seq_id); mutex_unlock(&table->lock); return -ENODATA; } if (entry->cmd) __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam); /* No dmac filter rules; disable drop on hit rule */ if (disable_cam) { rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n", __func__, drop_mcam_idx); } mutex_unlock(&table->lock); rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index); rvu_npc_exact_free_id(rvu, seq_id); dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n", __func__, seq_id, entry->mac); kfree(entry); return 0; } /** * rvu_npc_exact_add_table_entry - Adds a table entry * @rvu: resource virtualization unit. * @cgx_id: cgx identifier. * @lmac_id: lmac identifier. * @mac: MAC address. * @chan: Channel number. * @ctype: Channel Type. * @seq_id: Sequence number. * @cmd: Whether it is invoked by ethtool cmd. * @mcam_idx: NPC mcam index corresponding to MAC * @pcifunc: PCI func. * * Creates a new exact match table entry in either CAM or * MEM table. * Return: 0 upon success. */ static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac, u16 chan, u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc) { int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); enum npc_exact_opc_type opc_type; bool enable_cam = false; u32 drop_mcam_idx; u32 index; u64 mdata; bool rc; int err; u8 ways; ctype = 0; err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type); if (err) { dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__); return err; } /* Write mdata to table */ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac); if (opc_type == NPC_EXACT_OPC_CAM) rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata); else rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata); /* Insert entry to linked list */ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id, mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc); if (err) { rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__); return err; } rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, &drop_mcam_idx, NULL, NULL, NULL); if (!rc) { rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index); dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", __func__, cgx_id, lmac_id); return -EINVAL; } if (cmd) __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam); /* First command rule; enable drop on hit rule */ if (enable_cam) { rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true); dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n", __func__, drop_mcam_idx); } dev_dbg(rvu->dev, "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", __func__, index, mac, ways, opc_type); return 0; } /** * rvu_npc_exact_update_table_entry - Update exact match table. * @rvu: resource virtualization unit. * @cgx_id: CGX identifier. * @lmac_id: LMAC identifier. * @old_mac: Existing MAC address entry. * @new_mac: New MAC address entry. * @seq_id: Sequence identifier of the entry. * * Updates MAC address of an entry. If entry is in MEM table, new * hash value may not match with old one. * Return: 0 upon success. */ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *old_mac, u8 *new_mac, u32 *seq_id) { int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); struct npc_exact_table_entry *entry; struct npc_exact_table *table; u32 hash_index; u64 mdata; table = rvu->hw->table; mutex_lock(&table->lock); /* Lookup for entry which needs to be updated */ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id); if (!entry) { mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n", __func__, cgx_id, lmac_id, old_mac); return -ENODATA; } /* If entry is in mem table and new hash index is different than old * hash index, we cannot update the entry. Fail in these scenarios. */ if (entry->opc_type == NPC_EXACT_OPC_MEM) { hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype, new_mac, table->mem_table.mask, table->mem_table.depth); if (hash_index != entry->index) { dev_dbg(rvu->dev, "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n", __func__, hash_index, entry->index); mutex_unlock(&table->lock); return -EINVAL; } } mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac); if (entry->opc_type == NPC_EXACT_OPC_MEM) rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata); else rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata); /* Update entry fields */ ether_addr_copy(entry->mac, new_mac); *seq_id = entry->seq_id; dev_dbg(rvu->dev, "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n", __func__, entry->index, entry->mac, entry->ways, entry->opc_type); dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n", __func__, old_mac, new_mac); mutex_unlock(&table->lock); return 0; } /** * rvu_npc_exact_promisc_disable - Disable promiscuous mode. * @rvu: resource virtualization unit. * @pcifunc: pcifunc * * Drop rule is against each PF. We dont support DMAC filter for * VF. * Return: 0 upon success */ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; int pf = rvu_get_pf(pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; bool rc; table = rvu->hw->table; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, &drop_mcam_idx, NULL, NULL, NULL); if (!rc) { dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", __func__, cgx_id, lmac_id); return -EINVAL; } mutex_lock(&table->lock); promisc = &table->promisc_mode[drop_mcam_idx]; if (!*promisc) { mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n", __func__, cgx_id, lmac_id); return LMAC_AF_ERR_INVALID_PARAM; } *promisc = false; mutex_unlock(&table->lock); /* Enable drop rule */ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true); dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n", __func__, cgx_id, lmac_id); return 0; } /** * rvu_npc_exact_promisc_enable - Enable promiscuous mode. * @rvu: resource virtualization unit. * @pcifunc: pcifunc. * Return: 0 upon success */ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; int pf = rvu_get_pf(pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; bool rc; table = rvu->hw->table; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, &drop_mcam_idx, NULL, NULL, NULL); if (!rc) { dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n", __func__, cgx_id, lmac_id); return -EINVAL; } mutex_lock(&table->lock); promisc = &table->promisc_mode[drop_mcam_idx]; if (*promisc) { mutex_unlock(&table->lock); dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n", __func__, cgx_id, lmac_id); return LMAC_AF_ERR_INVALID_PARAM; } *promisc = true; mutex_unlock(&table->lock); /* disable drop rule */ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false); dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n", __func__, cgx_id, lmac_id); return 0; } /** * rvu_npc_exact_mac_addr_reset - Delete PF mac address. * @rvu: resource virtualization unit. * @req: Reset request * @rsp: Reset response. * Return: 0 upon success */ int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; int rc; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); if (rc) { /* TODO: how to handle this error case ? */ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); return 0; } dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n", __func__, pfvf->mac_addr, pf, seq_id); return 0; } /** * rvu_npc_exact_mac_addr_update - Update mac address field with new value. * @rvu: resource virtualization unit. * @req: Update request. * @rsp: Update response. * Return: 0 upon success */ int rvu_npc_exact_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct npc_exact_table_entry *entry; struct npc_exact_table *table; struct rvu_pfvf *pfvf; u32 seq_id, mcam_idx; u8 old_mac[ETH_ALEN]; u8 cgx_id, lmac_id; int rc; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) return LMAC_AF_ERR_PERM_DENIED; dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n", __func__, req->index, req->mac_addr); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); table = rvu->hw->table; mutex_lock(&table->lock); /* Lookup for entry which needs to be updated */ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index); if (!entry) { dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index); mutex_unlock(&table->lock); return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED; } ether_addr_copy(old_mac, entry->mac); seq_id = entry->seq_id; mcam_idx = entry->mcam_idx; mutex_unlock(&table->lock); rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac, req->mac_addr, &seq_id); if (!rc) { rsp->index = seq_id; dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n", __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf); ether_addr_copy(pfvf->mac_addr, req->mac_addr); return 0; } /* Try deleting and adding it again */ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); if (rc) { /* This could be a new entry */ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); } rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, pfvf->rx_chan_base, 0, &seq_id, true, mcam_idx, req->hdr.pcifunc); if (rc) { dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__, req->mac_addr, pf); return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; } rsp->index = seq_id; dev_dbg(rvu->dev, "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n", __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id); ether_addr_copy(pfvf->mac_addr, req->mac_addr); return 0; } /** * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table. * @rvu: resource virtualization unit. * @req: Add request. * @rsp: Add response. * Return: 0 upon success */ int rvu_npc_exact_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; int rc = 0; u32 seq_id; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, pfvf->rx_chan_base, 0, &seq_id, true, -1, req->hdr.pcifunc); if (!rc) { rsp->index = seq_id; dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n", __func__, req->mac_addr, pf, seq_id); return 0; } dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__, req->mac_addr, pf); return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; } /** * rvu_npc_exact_mac_addr_del - Delete DMAC filter * @rvu: resource virtualization unit. * @req: Delete request. * @rsp: Delete response. * Return: 0 upon success */ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); int rc; rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); if (!rc) { dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n", __func__, pf, req->index); return 0; } dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n", __func__, pf, req->index); return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED; } /** * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter. * @rvu: resource virtualization unit. * @req: Set request. * @rsp: Set response. * Return: 0 upon success */ int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; u32 mcam_idx = -1; int rc, nixlf; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); pfvf = &rvu->pf[pf]; /* If table does not have an entry; both update entry and del table entry API * below fails. Those are not failure conditions. */ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr, req->mac_addr, &seq_id); if (!rc) { rsp->index = seq_id; ether_addr_copy(pfvf->mac_addr, req->mac_addr); ether_addr_copy(rsp->mac_addr, req->mac_addr); dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n", __func__, req->mac_addr, pf); return 0; } /* Try deleting and adding it again */ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); if (rc) { dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf); } /* find mcam entry if exist */ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL); if (!rc) { mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc, nixlf, NIXLF_UCAST_ENTRY); } rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr, pfvf->rx_chan_base, 0, &seq_id, true, mcam_idx, req->hdr.pcifunc); if (rc) { dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__, req->mac_addr, pf); return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED; } rsp->index = seq_id; ether_addr_copy(rsp->mac_addr, req->mac_addr); ether_addr_copy(pfvf->mac_addr, req->mac_addr); dev_dbg(rvu->dev, "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n", __func__, req->mac_addr, pf, seq_id); return 0; } /** * rvu_npc_exact_can_disable_feature - Check if feature can be disabled. * @rvu: resource virtualization unit. * Return: True if exact match feature is supported. */ bool rvu_npc_exact_can_disable_feature(struct rvu *rvu) { struct npc_exact_table *table = rvu->hw->table; bool empty; if (!rvu->hw->cap.npc_exact_match_enabled) return false; mutex_lock(&table->lock); empty = list_empty(&table->lhead_gbl); mutex_unlock(&table->lock); return empty; } /** * rvu_npc_exact_disable_feature - Disable feature. * @rvu: resource virtualization unit. */ void rvu_npc_exact_disable_feature(struct rvu *rvu) { rvu->hw->cap.npc_exact_match_enabled = false; } /** * rvu_npc_exact_reset - Delete and free all entry which match pcifunc. * @rvu: resource virtualization unit. * @pcifunc: PCI func to match. */ void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table = rvu->hw->table; struct npc_exact_table_entry *tmp, *iter; u32 seq_id; mutex_lock(&table->lock); list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) { if (pcifunc != iter->pcifunc) continue; seq_id = iter->seq_id; dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__, pcifunc, seq_id); mutex_unlock(&table->lock); rvu_npc_exact_del_table_entry_by_id(rvu, seq_id); mutex_lock(&table->lock); } mutex_unlock(&table->lock); } /** * rvu_npc_exact_init - initialize exact match table * @rvu: resource virtualization unit. * * Initialize HW and SW resources to manage 4way-2K table and fully * associative 32-entry mcam table. * Return: 0 upon success. */ int rvu_npc_exact_init(struct rvu *rvu) { u64 bcast_mcast_val, bcast_mcast_mask; struct npc_exact_table *table; u64 exact_val, exact_mask; u64 chan_val, chan_mask; u8 cgx_id, lmac_id; u32 *drop_mcam_idx; u16 max_lmac_cnt; u64 npc_const3; int table_size; int blkaddr; u16 pcifunc; int err, i; u64 cfg; bool rc; /* Read NPC_AF_CONST3 and check for have exact * match functionality is present */ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL; } /* Check exact match feature is supported */ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3); if (!(npc_const3 & BIT_ULL(62))) return 0; /* Check if kex profile has enabled EXACT match nibble */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); if (!(cfg & NPC_EXACT_NIBBLE_HIT)) return 0; /* Set capability to true */ rvu->hw->cap.npc_exact_match_enabled = true; table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__); rvu->hw->table = table; /* Read table size, ways and depth */ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3); table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n", __func__, table->mem_table.ways, table->cam_table.depth); /* Check if depth of table is not a sequre of 2 * TODO: why _builtin_popcount() is not working ? */ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) { dev_err(rvu->dev, "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n", __func__, table->mem_table.depth); return -EINVAL; } table_size = table->mem_table.depth * table->mem_table.ways; /* Allocate bitmap for 4way 2K table */ table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size, GFP_KERNEL); if (!table->mem_table.bmap) return -ENOMEM; dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__); /* Allocate bitmap for 32 entry mcam */ table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL); if (!table->cam_table.bmap) return -ENOMEM; dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__); table->tot_ids = table_size + table->cam_table.depth; table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids, GFP_KERNEL); if (!table->id_bmap) return -ENOMEM; dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n", __func__, table->tot_ids); /* Initialize list heads for npc_exact_table entries. * This entry is used by debugfs to show entries in * exact match table. */ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++) INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]); INIT_LIST_HEAD(&table->lhead_cam_tbl_entry); INIT_LIST_HEAD(&table->lhead_gbl); mutex_init(&table->lock); rvu_exact_config_secret_key(rvu); rvu_exact_config_search_key(rvu); rvu_exact_config_table_mask(rvu); rvu_exact_config_result_ctrl(rvu, table->mem_table.depth); /* - No drop rule for LBK * - Drop rules for SDP and each LMAC. */ exact_val = !NPC_EXACT_RESULT_HIT; exact_mask = NPC_EXACT_RESULT_HIT; /* nibble - 3 2 1 0 * L3B L3M L2B L2M */ bcast_mcast_val = 0b0000; bcast_mcast_mask = 0b0011; /* Install SDP drop rule */ drop_mcam_idx = &table->num_drop_rules; max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx + PF_CGXMAP_BASE; for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { if (rvu->pf2cgxlmac_map[i] == 0xFF) continue; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id); rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id, &chan_val, &chan_mask); if (!rc) { dev_err(rvu->dev, "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n", __func__, chan_val, chan_mask, *drop_mcam_idx); return -EINVAL; } /* Filter rules are only for PF */ pcifunc = RVU_PFFUNC(i, 0); dev_dbg(rvu->dev, "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", __func__, cgx_id, lmac_id, chan_val, chan_mask); rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules, chan_val, chan_mask, pcifunc); if (!rc) { dev_err(rvu->dev, "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n", __func__, cgx_id, lmac_id, chan_val); return -EINVAL; } err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx, &table->counter_idx[*drop_mcam_idx], chan_val, chan_mask, exact_val, exact_mask, bcast_mcast_val, bcast_mcast_mask); if (err) { dev_err(rvu->dev, "failed to configure drop rule (cgx=%d lmac=%d)\n", cgx_id, lmac_id); return err; } (*drop_mcam_idx)++; } dev_info(rvu->dev, "initialized exact match table successfully\n"); return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * */ #include<linux/bitfield.h> #include "rvu.h" #include "rvu_reg.h" #include "rvu_struct.h" #include "rvu_npc_hash.h" #define DRV_NAME "octeontx2-af" static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) { int err; err = devlink_fmsg_pair_nest_start(fmsg, name); if (err) return err; return devlink_fmsg_obj_nest_start(fmsg); } static int rvu_report_pair_end(struct devlink_fmsg *fmsg) { int err; err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; return devlink_fmsg_pair_nest_end(fmsg); } static bool rvu_common_request_irq(struct rvu *rvu, int offset, const char *name, irq_handler_t fn) { struct rvu_devlink *rvu_dl = rvu->rvu_dl; int rc; sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name); rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, &rvu->irq_name[offset * NAME_SIZE], rvu_dl); if (rc) dev_warn(rvu->dev, "Failed to register %s irq\n", name); else rvu->irq_allocated[offset] = true; return rvu->irq_allocated[offset]; } static void rvu_nix_intr_work(struct work_struct *work) { struct rvu_nix_health_reporters *rvu_nix_health_reporter; rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work); devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter, "NIX_AF_RVU Error", rvu_nix_health_reporter->nix_event_ctx); } static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq) { struct rvu_nix_event_ctx *nix_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return IRQ_NONE; nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT); nix_event_context->nix_af_rvu_int = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr); rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work); return IRQ_HANDLED; } static void rvu_nix_gen_work(struct work_struct *work) { struct rvu_nix_health_reporters *rvu_nix_health_reporter; rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work); devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter, "NIX_AF_GEN Error", rvu_nix_health_reporter->nix_event_ctx); } static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq) { struct rvu_nix_event_ctx *nix_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return IRQ_NONE; nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT); nix_event_context->nix_af_rvu_gen = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr); rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work); return IRQ_HANDLED; } static void rvu_nix_err_work(struct work_struct *work) { struct rvu_nix_health_reporters *rvu_nix_health_reporter; rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work); devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter, "NIX_AF_ERR Error", rvu_nix_health_reporter->nix_event_ctx); } static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq) { struct rvu_nix_event_ctx *nix_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return IRQ_NONE; nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT); nix_event_context->nix_af_rvu_err = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr); rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work); return IRQ_HANDLED; } static void rvu_nix_ras_work(struct work_struct *work) { struct rvu_nix_health_reporters *rvu_nix_health_reporter; rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work); devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter, "NIX_AF_RAS Error", rvu_nix_health_reporter->nix_event_ctx); } static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq) { struct rvu_nix_event_ctx *nix_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return IRQ_NONE; nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT); nix_event_context->nix_af_rvu_ras = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr); rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work); return IRQ_HANDLED; } static void rvu_nix_unregister_interrupts(struct rvu *rvu) { struct rvu_devlink *rvu_dl = rvu->rvu_dl; int offs, i, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return; offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; if (!offs) return; rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL); if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) { free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU), rvu_dl); rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; } for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; } } static int rvu_nix_register_interrupts(struct rvu *rvu) { int blkaddr, base; bool rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; /* Get NIX AF MSIX vectors offset. */ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; if (!base) { dev_warn(rvu->dev, "Failed to get NIX%d NIX_AF_INT vector offsets\n", blkaddr - BLKADDR_NIX0); return 0; } /* Register and enable NIX_AF_RVU_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU, "NIX_AF_RVU_INT", rvu_nix_af_rvu_intr_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL); /* Register and enable NIX_AF_GEN_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN, "NIX_AF_GEN_INT", rvu_nix_af_rvu_gen_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL); /* Register and enable NIX_AF_ERR_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR, "NIX_AF_ERR_INT", rvu_nix_af_rvu_err_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL); /* Register and enable NIX_AF_RAS interrupt */ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON, "NIX_AF_RAS", rvu_nix_af_rvu_ras_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); return 0; err: rvu_nix_unregister_interrupts(rvu); return rc; } static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx, enum nix_af_rvu_health health_reporter) { struct rvu_nix_event_ctx *nix_event_context; u64 intr_val; int err; nix_event_context = ctx; switch (health_reporter) { case NIX_AF_RVU_INTR: intr_val = nix_event_context->nix_af_rvu_int; err = rvu_report_pair_start(fmsg, "NIX_AF_RVU"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", nix_event_context->nix_af_rvu_int); if (err) return err; if (intr_val & BIT_ULL(0)) { err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NIX_AF_RVU_GEN: intr_val = nix_event_context->nix_af_rvu_gen; err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", nix_event_context->nix_af_rvu_gen); if (err) return err; if (intr_val & BIT_ULL(0)) { err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); if (err) return err; } if (intr_val & BIT_ULL(1)) { err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); if (err) return err; } if (intr_val & BIT_ULL(4)) { err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NIX_AF_RVU_ERR: intr_val = nix_event_context->nix_af_rvu_err; err = rvu_report_pair_start(fmsg, "NIX_AF_ERR"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", nix_event_context->nix_af_rvu_err); if (err) return err; if (intr_val & BIT_ULL(14)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); if (err) return err; } if (intr_val & BIT_ULL(13)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); if (err) return err; } if (intr_val & BIT_ULL(12)) { err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); if (err) return err; } if (intr_val & BIT_ULL(6)) { err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); if (err) return err; } if (intr_val & BIT_ULL(5)) { err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); if (err) return err; } if (intr_val & BIT_ULL(4)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); if (err) return err; } if (intr_val & BIT_ULL(3)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); if (err) return err; } if (intr_val & BIT_ULL(2)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); if (err) return err; } if (intr_val & BIT_ULL(1)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); if (err) return err; } if (intr_val & BIT_ULL(0)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NIX_AF_RVU_RAS: intr_val = nix_event_context->nix_af_rvu_err; err = rvu_report_pair_start(fmsg, "NIX_AF_RAS"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", nix_event_context->nix_af_rvu_err); if (err) return err; err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); if (err) return err; if (intr_val & BIT_ULL(34)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); if (err) return err; } if (intr_val & BIT_ULL(33)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); if (err) return err; } if (intr_val & BIT_ULL(32)) { err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); if (err) return err; } if (intr_val & BIT_ULL(4)) { err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); if (err) return err; } if (intr_val & BIT_ULL(3)) { err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); if (err) return err; } if (intr_val & BIT_ULL(2)) { err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); if (err) return err; } if (intr_val & BIT_ULL(1)) { err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); if (err) return err; } if (intr_val & BIT_ULL(0)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; default: return -EINVAL; } return 0; } static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_nix_event_ctx *nix_ctx; nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) : rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR); } static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_nix_event_ctx *nix_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; if (nix_event_ctx->nix_af_rvu_int) rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_nix_event_ctx *nix_ctx; nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) : rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN); } static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_nix_event_ctx *nix_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; if (nix_event_ctx->nix_af_rvu_gen) rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_nix_event_ctx *nix_ctx; nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) : rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR); } static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_nix_event_ctx *nix_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; if (nix_event_ctx->nix_af_rvu_err) rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_nix_event_ctx *nix_ctx; nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) : rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS); } static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_nix_event_ctx *nix_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; if (nix_event_ctx->nix_af_rvu_int) rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); return 0; } RVU_REPORTERS(hw_nix_intr); RVU_REPORTERS(hw_nix_gen); RVU_REPORTERS(hw_nix_err); RVU_REPORTERS(hw_nix_ras); static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl); static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) { struct rvu_nix_health_reporters *rvu_reporters; struct rvu_nix_event_ctx *nix_event_context; struct rvu *rvu = rvu_dl->rvu; rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL); if (!rvu_reporters) return -ENOMEM; rvu_dl->rvu_nix_health_reporter = rvu_reporters; nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL); if (!nix_event_context) return -ENOMEM; rvu_reporters->nix_event_ctx = nix_event_context; rvu_reporters->rvu_hw_nix_intr_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) { dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter); } rvu_reporters->rvu_hw_nix_gen_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) { dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter); } rvu_reporters->rvu_hw_nix_err_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) { dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter); } rvu_reporters->rvu_hw_nix_ras_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) { dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter); } rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) goto err; INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work); INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work); INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work); INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work); return 0; err: rvu_nix_health_reporters_destroy(rvu_dl); return -ENOMEM; } static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl) { struct rvu *rvu = rvu_dl->rvu; int err; err = rvu_nix_register_reporters(rvu_dl); if (err) { dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n", err); return err; } rvu_nix_register_interrupts(rvu); return 0; } static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl) { struct rvu_nix_health_reporters *nix_reporters; struct rvu *rvu = rvu_dl->rvu; nix_reporters = rvu_dl->rvu_nix_health_reporter; if (!nix_reporters->rvu_hw_nix_ras_reporter) return; if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter)) devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter); if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter)) devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter); if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter)) devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter); if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter)) devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter); rvu_nix_unregister_interrupts(rvu); kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx); kfree(rvu_dl->rvu_nix_health_reporter); } static void rvu_npa_intr_work(struct work_struct *work) { struct rvu_npa_health_reporters *rvu_npa_health_reporter; rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work); devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter, "NPA_AF_RVU Error", rvu_npa_health_reporter->npa_event_ctx); } static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq) { struct rvu_npa_event_ctx *npa_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return IRQ_NONE; npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT); npa_event_context->npa_af_rvu_int = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr); rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work); return IRQ_HANDLED; } static void rvu_npa_gen_work(struct work_struct *work) { struct rvu_npa_health_reporters *rvu_npa_health_reporter; rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work); devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter, "NPA_AF_GEN Error", rvu_npa_health_reporter->npa_event_ctx); } static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq) { struct rvu_npa_event_ctx *npa_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return IRQ_NONE; npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT); npa_event_context->npa_af_rvu_gen = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr); rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work); return IRQ_HANDLED; } static void rvu_npa_err_work(struct work_struct *work) { struct rvu_npa_health_reporters *rvu_npa_health_reporter; rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work); devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter, "NPA_AF_ERR Error", rvu_npa_health_reporter->npa_event_ctx); } static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq) { struct rvu_npa_event_ctx *npa_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return IRQ_NONE; npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT); npa_event_context->npa_af_rvu_err = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr); rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work); return IRQ_HANDLED; } static void rvu_npa_ras_work(struct work_struct *work) { struct rvu_npa_health_reporters *rvu_npa_health_reporter; rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work); devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter, "HW NPA_AF_RAS Error reported", rvu_npa_health_reporter->npa_event_ctx); } static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq) { struct rvu_npa_event_ctx *npa_event_context; struct rvu_devlink *rvu_dl = rvu_irq; struct rvu *rvu; int blkaddr; u64 intr; rvu = rvu_dl->rvu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return IRQ_NONE; npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS); npa_event_context->npa_af_rvu_ras = intr; /* Clear interrupts */ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr); rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work); return IRQ_HANDLED; } static void rvu_npa_unregister_interrupts(struct rvu *rvu) { struct rvu_devlink *rvu_dl = rvu->rvu_dl; int i, offs, blkaddr; u64 reg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return; reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG); offs = reg & 0x3FF; rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); for (i = 0; i < NPA_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; } } static int rvu_npa_register_interrupts(struct rvu *rvu) { int blkaddr, base; bool rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return blkaddr; /* Get NPA AF MSIX vectors offset. */ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff; if (!base) { dev_warn(rvu->dev, "Failed to get NPA_AF_INT vector offsets\n"); return 0; } /* Register and enable NPA_AF_RVU_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU, "NPA_AF_RVU_INT", rvu_npa_af_rvu_intr_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); /* Register and enable NPA_AF_GEN_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN, "NPA_AF_RVU_GEN", rvu_npa_af_gen_intr_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); /* Register and enable NPA_AF_ERR_INT interrupt */ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR, "NPA_AF_ERR_INT", rvu_npa_af_err_intr_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); /* Register and enable NPA_AF_RAS interrupt */ rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON, "NPA_AF_RAS", rvu_npa_af_ras_intr_handler); if (!rc) goto err; rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); return 0; err: rvu_npa_unregister_interrupts(rvu); return rc; } static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx, enum npa_af_rvu_health health_reporter) { struct rvu_npa_event_ctx *npa_event_context; unsigned int alloc_dis, free_dis; u64 intr_val; int err; npa_event_context = ctx; switch (health_reporter) { case NPA_AF_RVU_GEN: intr_val = npa_event_context->npa_af_rvu_gen; err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", npa_event_context->npa_af_rvu_gen); if (err) return err; if (intr_val & BIT_ULL(32)) { err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); if (err) return err; } free_dis = FIELD_GET(GENMASK(15, 0), intr_val); if (free_dis & BIT(NPA_INPQ_NIX0_RX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_NIX0_TX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_NIX1_RX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_NIX1_TX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_SSO)) { err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_TIM)) { err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_DPI)) { err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); if (err) return err; } if (free_dis & BIT(NPA_INPQ_AURA_OP)) { err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); if (err) return err; } alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val); if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) { err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_SSO)) { err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_TIM)) { err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_DPI)) { err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); if (err) return err; } if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) { err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NPA_AF_RVU_ERR: err = rvu_report_pair_start(fmsg, "NPA_AF_ERR"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", npa_event_context->npa_af_rvu_err); if (err) return err; if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); if (err) return err; } if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) { err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); if (err) return err; } if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) { err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NPA_AF_RVU_RAS: err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", npa_event_context->npa_af_rvu_ras); if (err) return err; if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) { err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); if (err) return err; } if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) { err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); if (err) return err; } if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) { err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); if (err) return err; } err = rvu_report_pair_end(fmsg); if (err) return err; break; case NPA_AF_RVU_INTR: err = rvu_report_pair_start(fmsg, "NPA_AF_RVU"); if (err) return err; err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", npa_event_context->npa_af_rvu_int); if (err) return err; if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) { err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); if (err) return err; } return rvu_report_pair_end(fmsg); default: return -EINVAL; } return 0; } static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_npa_event_ctx *npa_ctx; npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) : rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR); } static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_npa_event_ctx *npa_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return blkaddr; if (npa_event_ctx->npa_af_rvu_int) rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_npa_event_ctx *npa_ctx; npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) : rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN); } static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_npa_event_ctx *npa_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return blkaddr; if (npa_event_ctx->npa_af_rvu_gen) rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_npa_event_ctx *npa_ctx; npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) : rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR); } static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_npa_event_ctx *npa_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return blkaddr; if (npa_event_ctx->npa_af_rvu_err) rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); return 0; } static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct rvu_npa_event_ctx *npa_ctx; npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) : rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS); } static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter, void *ctx, struct netlink_ext_ack *netlink_extack) { struct rvu *rvu = devlink_health_reporter_priv(reporter); struct rvu_npa_event_ctx *npa_event_ctx = ctx; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return blkaddr; if (npa_event_ctx->npa_af_rvu_ras) rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); return 0; } RVU_REPORTERS(hw_npa_intr); RVU_REPORTERS(hw_npa_gen); RVU_REPORTERS(hw_npa_err); RVU_REPORTERS(hw_npa_ras); static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl); static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) { struct rvu_npa_health_reporters *rvu_reporters; struct rvu_npa_event_ctx *npa_event_context; struct rvu *rvu = rvu_dl->rvu; rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL); if (!rvu_reporters) return -ENOMEM; rvu_dl->rvu_npa_health_reporter = rvu_reporters; npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL); if (!npa_event_context) return -ENOMEM; rvu_reporters->npa_event_ctx = npa_event_context; rvu_reporters->rvu_hw_npa_intr_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) { dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter); } rvu_reporters->rvu_hw_npa_gen_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) { dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter); } rvu_reporters->rvu_hw_npa_err_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) { dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter); } rvu_reporters->rvu_hw_npa_ras_reporter = devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu); if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) { dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n", PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)); return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter); } rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) goto err; INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work); INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work); INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work); INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work); return 0; err: rvu_npa_health_reporters_destroy(rvu_dl); return -ENOMEM; } static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl) { struct rvu *rvu = rvu_dl->rvu; int err; err = rvu_npa_register_reporters(rvu_dl); if (err) { dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n", err); return err; } rvu_npa_register_interrupts(rvu); return 0; } static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl) { struct rvu_npa_health_reporters *npa_reporters; struct rvu *rvu = rvu_dl->rvu; npa_reporters = rvu_dl->rvu_npa_health_reporter; if (!npa_reporters->rvu_hw_npa_ras_reporter) return; if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter)) devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter); if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter)) devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter); if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter)) devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter); if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter)) devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter); rvu_npa_unregister_interrupts(rvu); kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx); kfree(rvu_dl->rvu_npa_health_reporter); } static int rvu_health_reporters_create(struct rvu *rvu) { struct rvu_devlink *rvu_dl; int err; rvu_dl = rvu->rvu_dl; err = rvu_npa_health_reporters_create(rvu_dl); if (err) return err; return rvu_nix_health_reporters_create(rvu_dl); } static void rvu_health_reporters_destroy(struct rvu *rvu) { struct rvu_devlink *rvu_dl; if (!rvu->rvu_dl) return; rvu_dl = rvu->rvu_dl; rvu_npa_health_reporters_destroy(rvu_dl); rvu_nix_health_reporters_destroy(rvu_dl); } /* Devlink Params APIs */ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; int dwrr_mtu = val.vu32; struct nix_txsch *txsch; struct nix_hw *nix_hw; if (!rvu->hw->cap.nix_common_dwrr_mtu) { NL_SET_ERR_MSG_MOD(extack, "Setting DWRR_MTU is not supported on this silicon"); return -EOPNOTSUPP; } if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) && (dwrr_mtu != 9728 && dwrr_mtu != 10240)) { NL_SET_ERR_MSG_MOD(extack, "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240"); return -EINVAL; } nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0); if (!nix_hw) return -ENODEV; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) { NL_SET_ERR_MSG_MOD(extack, "Changing DWRR MTU is not supported when there are active NIXLFs"); NL_SET_ERR_MSG_MOD(extack, "Make sure none of the PF/VF interfaces are initialized and retry"); return -EOPNOTSUPP; } return 0; } static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; u64 dwrr_mtu; dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32); rvu_write64(rvu, BLKADDR_NIX0, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu); return 0; } static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; u64 dwrr_mtu; if (!rvu->hw->cap.nix_common_dwrr_mtu) return -EOPNOTSUPP; dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu); return 0; } enum rvu_af_dl_param_id { RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, }; static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; bool enabled; enabled = rvu_npc_exact_has_match_table(rvu); snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", enabled ? "enabled" : "disabled"); return 0; } static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; rvu_npc_exact_disable_feature(rvu); return 0; } static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; u64 enable; if (kstrtoull(val.vstr, 10, &enable)) { NL_SET_ERR_MSG_MOD(extack, "Only 1 value is supported"); return -EINVAL; } if (enable != 1) { NL_SET_ERR_MSG_MOD(extack, "Only disabling exact match feature is supported"); return -EINVAL; } if (rvu_npc_exact_can_disable_feature(rvu)) return 0; NL_SET_ERR_MSG_MOD(extack, "Can't disable exact match feature; Please try before any configuration"); return -EFAULT; } static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; struct npc_mcam *mcam; u32 percent; mcam = &rvu->hw->mcam; percent = (mcam->hprio_count * 100) / mcam->bmap_entries; ctx->val.vu8 = (u8)percent; return 0; } static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; struct npc_mcam *mcam; u32 percent; percent = ctx->val.vu8; mcam = &rvu->hw->mcam; mcam->hprio_count = (mcam->bmap_entries * percent) / 100; mcam->hprio_end = mcam->hprio_count; mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2; mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; return 0; } static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; struct npc_mcam *mcam; /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */ if (val.vu8 < 12 || val.vu8 > 100) { NL_SET_ERR_MSG_MOD(extack, "mcam high zone percent must be between 12% to 100%"); return -EINVAL; } /* Do not allow user to modify the high priority zone entries while mcam entries * have already been assigned. */ mcam = &rvu->hw->mcam; if (mcam->bmap_fcnt < mcam->bmap_entries) { NL_SET_ERR_MSG_MOD(extack, "mcam entries have already been assigned, can't resize"); return -EPERM; } return 0; } static const struct devlink_param rvu_af_dl_params[] = { DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, "dwrr_mtu", DEVLINK_PARAM_TYPE_U32, BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, rvu_af_dl_dwrr_mtu_validate), }; static const struct devlink_param rvu_af_dl_param_exact_match[] = { DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_npc_exact_feature_get, rvu_af_npc_exact_feature_disable, rvu_af_npc_exact_feature_validate), DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8, BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_npc_mcam_high_zone_percent_get, rvu_af_dl_npc_mcam_high_zone_percent_set, rvu_af_dl_npc_mcam_high_zone_percent_validate), }; /* Devlink switch mode */ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; struct rvu_switch *rswitch; rswitch = &rvu->rswitch; *mode = rswitch->mode; return 0; } static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; struct rvu_switch *rswitch; rswitch = &rvu->rswitch; switch (mode) { case DEVLINK_ESWITCH_MODE_LEGACY: case DEVLINK_ESWITCH_MODE_SWITCHDEV: if (rswitch->mode == mode) return 0; rswitch->mode = mode; if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) rvu_switch_enable(rvu); else rvu_switch_disable(rvu); break; default: return -EINVAL; } return 0; } static const struct devlink_ops rvu_devlink_ops = { .eswitch_mode_get = rvu_devlink_eswitch_mode_get, .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; int rvu_register_dl(struct rvu *rvu) { struct rvu_devlink *rvu_dl; struct devlink *dl; int err; dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), rvu->dev); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); return -ENOMEM; } rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; err = rvu_health_reporters_create(rvu); if (err) { dev_err(rvu->dev, "devlink health reporter creation failed with error %d\n", err); goto err_dl_health; } err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); if (err) { dev_err(rvu->dev, "devlink params register failed with error %d", err); goto err_dl_health; } /* Register exact match devlink only for CN10K-B */ if (!rvu_npc_exact_has_match_table(rvu)) goto done; err = devlink_params_register(dl, rvu_af_dl_param_exact_match, ARRAY_SIZE(rvu_af_dl_param_exact_match)); if (err) { dev_err(rvu->dev, "devlink exact match params register failed with error %d", err); goto err_dl_exact_match; } done: devlink_register(dl); return 0; err_dl_exact_match: devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); err_dl_health: rvu_health_reporters_destroy(rvu); devlink_free(dl); return err; } void rvu_unregister_dl(struct rvu *rvu) { struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct devlink *dl = rvu_dl->dl; devlink_unregister(dl); devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); /* Unregister exact match devlink only for CN10K-B */ if (rvu_npc_exact_has_match_table(rvu)) devlink_params_unregister(dl, rvu_af_dl_param_exact_match, ARRAY_SIZE(rvu_af_dl_param_exact_match)); rvu_health_reporters_destroy(rvu); devlink_free(dl); }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include "rvu_reg.h" #include "mbox.h" #include "rvu_trace.h" static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; mdev->msg_size = 0; mdev->rsp_size = 0; tx_hdr->num_msgs = 0; tx_hdr->msg_size = 0; rx_hdr->num_msgs = 0; rx_hdr->msg_size = 0; } EXPORT_SYMBOL(__otx2_mbox_reset); void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; spin_lock(&mdev->mbox_lock); __otx2_mbox_reset(mbox, devid); spin_unlock(&mdev->mbox_lock); } EXPORT_SYMBOL(otx2_mbox_reset); void otx2_mbox_destroy(struct otx2_mbox *mbox) { mbox->reg_base = NULL; mbox->hwbase = NULL; kfree(mbox->dev); mbox->dev = NULL; } EXPORT_SYMBOL(otx2_mbox_destroy); static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, void *reg_base, int direction, int ndevs) { switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_PFVF: mbox->tx_start = MBOX_DOWN_TX_START; mbox->rx_start = MBOX_DOWN_RX_START; mbox->tx_size = MBOX_DOWN_TX_SIZE; mbox->rx_size = MBOX_DOWN_RX_SIZE; break; case MBOX_DIR_PFAF: case MBOX_DIR_VFPF: mbox->tx_start = MBOX_DOWN_RX_START; mbox->rx_start = MBOX_DOWN_TX_START; mbox->tx_size = MBOX_DOWN_RX_SIZE; mbox->rx_size = MBOX_DOWN_TX_SIZE; break; case MBOX_DIR_AFPF_UP: case MBOX_DIR_PFVF_UP: mbox->tx_start = MBOX_UP_TX_START; mbox->rx_start = MBOX_UP_RX_START; mbox->tx_size = MBOX_UP_TX_SIZE; mbox->rx_size = MBOX_UP_RX_SIZE; break; case MBOX_DIR_PFAF_UP: case MBOX_DIR_VFPF_UP: mbox->tx_start = MBOX_UP_RX_START; mbox->rx_start = MBOX_UP_TX_START; mbox->tx_size = MBOX_UP_RX_SIZE; mbox->rx_size = MBOX_UP_TX_SIZE; break; default: return -ENODEV; } switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_AFPF_UP: mbox->trigger = RVU_AF_AFPF_MBOX0; mbox->tr_shift = 4; break; case MBOX_DIR_PFAF: case MBOX_DIR_PFAF_UP: mbox->trigger = RVU_PF_PFAF_MBOX1; mbox->tr_shift = 0; break; case MBOX_DIR_PFVF: case MBOX_DIR_PFVF_UP: mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; mbox->tr_shift = 12; break; case MBOX_DIR_VFPF: case MBOX_DIR_VFPF_UP: mbox->trigger = RVU_VF_VFPF_MBOX1; mbox->tr_shift = 0; break; default: return -ENODEV; } mbox->reg_base = reg_base; mbox->pdev = pdev; mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); if (!mbox->dev) { otx2_mbox_destroy(mbox); return -ENOMEM; } mbox->ndevs = ndevs; return 0; } int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, void *reg_base, int direction, int ndevs) { struct otx2_mbox_dev *mdev; int devid, err; err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); if (err) return err; mbox->hwbase = hwbase; for (devid = 0; devid < ndevs; devid++) { mdev = &mbox->dev[devid]; mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); mdev->hwbase = mdev->mbase; spin_lock_init(&mdev->mbox_lock); /* Init header to reset value */ otx2_mbox_reset(mbox, devid); } return 0; } EXPORT_SYMBOL(otx2_mbox_init); /* Initialize mailbox with the set of mailbox region addresses * in the array hwbase. */ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase, struct pci_dev *pdev, void *reg_base, int direction, int ndevs, unsigned long *pf_bmap) { struct otx2_mbox_dev *mdev; int devid, err; err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); if (err) return err; mbox->hwbase = hwbase[0]; for (devid = 0; devid < ndevs; devid++) { if (!test_bit(devid, pf_bmap)) continue; mdev = &mbox->dev[devid]; mdev->mbase = hwbase[devid]; mdev->hwbase = hwbase[devid]; spin_lock_init(&mdev->mbox_lock); /* Init header to reset value */ otx2_mbox_reset(mbox, devid); } return 0; } EXPORT_SYMBOL(otx2_mbox_regions_init); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct device *sender = &mbox->pdev->dev; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; usleep_range(800, 1000); } dev_dbg(sender, "timed out while waiting for rsp\n"); return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; unsigned long timeout = jiffies + 1 * HZ; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; cpu_relax(); } return -EIO; } EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; /* If bounce buffer is implemented copy mbox messages from * bounce buffer to hw mbox memory. */ if (mdev->mbase != hw_mbase) memcpy(hw_mbase + mbox->tx_start + msgs_offset, mdev->mbase + mbox->tx_start + msgs_offset, mdev->msg_size); spin_lock(&mdev->mbox_lock); tx_hdr->msg_size = mdev->msg_size; /* Reset header for next messages */ mdev->msg_size = 0; mdev->rsp_size = 0; mdev->msgs_acked = 0; /* Sync mbox data into memory */ smp_wmb(); /* num_msgs != 0 signals to the peer that the buffer has a number of * messages. So this should be written after writing all the messages * to the shared memory. */ tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); spin_unlock(&mdev->mbox_lock); /* The interrupt should be fired after num_msgs is written * to the shared memory */ writeq(1, (void __iomem *)mbox->reg_base + (mbox->trigger | (devid << mbox->tr_shift))); } EXPORT_SYMBOL(otx2_mbox_msg_send); struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size, int size_rsp) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_msghdr *msghdr = NULL; spin_lock(&mdev->mbox_lock); size = ALIGN(size, MBOX_MSG_ALIGN); size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); /* Check if there is space in mailbox */ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) goto exit; if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) goto exit; if (mdev->msg_size == 0) mdev->num_msgs = 0; mdev->num_msgs++; msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; /* Clear the whole msg region */ memset(msghdr, 0, size); /* Init message header with reset values */ msghdr->ver = OTX2_MBOX_VERSION; mdev->msg_size += size; mdev->rsp_size += size_rsp; msghdr->next_msgoff = mdev->msg_size + msgs_offset; exit: spin_unlock(&mdev->mbox_lock); return msghdr; } EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *msg) { unsigned long imsg = mbox->tx_start + msgs_offset; unsigned long irsp = mbox->rx_start + msgs_offset; struct otx2_mbox_dev *mdev = &mbox->dev[devid]; u16 msgs; spin_lock(&mdev->mbox_lock); if (mdev->num_msgs != mdev->msgs_acked) goto error; for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { struct mbox_msghdr *pmsg = mdev->mbase + imsg; struct mbox_msghdr *prsp = mdev->mbase + irsp; if (msg == pmsg) { if (pmsg->id != prsp->id) goto error; spin_unlock(&mdev->mbox_lock); return prsp; } imsg = mbox->tx_start + pmsg->next_msgoff; irsp = mbox->rx_start + prsp->next_msgoff; } error: spin_unlock(&mdev->mbox_lock); return ERR_PTR(-ENODEV); } EXPORT_SYMBOL(otx2_mbox_get_rsp); int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid) { unsigned long ireq = mbox->tx_start + msgs_offset; unsigned long irsp = mbox->rx_start + msgs_offset; struct otx2_mbox_dev *mdev = &mbox->dev[devid]; int rc = -ENODEV; u16 msgs; spin_lock(&mdev->mbox_lock); if (mdev->num_msgs != mdev->msgs_acked) goto exit; for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { struct mbox_msghdr *preq = mdev->mbase + ireq; struct mbox_msghdr *prsp = mdev->mbase + irsp; if (preq->id != prsp->id) { trace_otx2_msg_check(mbox->pdev, preq->id, prsp->id, prsp->rc); goto exit; } if (prsp->rc) { rc = prsp->rc; trace_otx2_msg_check(mbox->pdev, preq->id, prsp->id, prsp->rc); goto exit; } ireq = mbox->tx_start + preq->next_msgoff; irsp = mbox->rx_start + prsp->next_msgoff; } rc = 0; exit: spin_unlock(&mdev->mbox_lock); return rc; } EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs); int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) { struct msg_rsp *rsp; rsp = (struct msg_rsp *) otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); if (!rsp) return -ENOMEM; rsp->hdr.id = id; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; rsp->hdr.rc = MBOX_MSG_INVALID; rsp->hdr.pcifunc = pcifunc; return 0; } EXPORT_SYMBOL(otx2_reply_invalid_msg); bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; bool ret; spin_lock(&mdev->mbox_lock); ret = mdev->num_msgs != 0; spin_unlock(&mdev->mbox_lock); return ret; } EXPORT_SYMBOL(otx2_mbox_nonempty); const char *otx2_mbox_id2name(u16 id) { switch (id) { #define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M default: return "INVALID ID"; } } EXPORT_SYMBOL(otx2_mbox_id2name); MODULE_AUTHOR("Marvell."); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2021 Marvell. * */ #include <linux/bitfield.h> #include "rvu.h" static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct nix_hw *nix_hw; nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr); /* Enable LBK links with channel 63 for TX MCAM rule */ rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc, &nix_hw->txsch[NIX_TXSCH_LVL_TL2], enable); } static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, u16 chan_mask) { struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct rvu_pfvf *pfvf; pfvf = rvu_get_pfvf(rvu, pcifunc); /* If the pcifunc is not initialized then nothing to do. * This same function will be called again via rvu_switch_update_rules * after pcifunc is initialized. */ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) return 0; ether_addr_copy(req.packet.dmac, pfvf->mac_addr); eth_broadcast_addr((u8 *)&req.mask.dmac); req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc; req.features = BIT_ULL(NPC_DMAC); req.channel = pfvf->rx_chan_base; req.chan_mask = chan_mask; req.intf = pfvf->nix_rx_intf; req.op = NIX_RX_ACTION_DEFAULT; req.default_rule = 1; return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) { struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct rvu_pfvf *pfvf; u8 lbkid; pfvf = rvu_get_pfvf(rvu, pcifunc); /* If the pcifunc is not initialized then nothing to do. * This same function will be called again via rvu_switch_update_rules * after pcifunc is initialized. */ if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) return 0; rvu_switch_enable_lbk_link(rvu, pcifunc, true); lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; ether_addr_copy(req.packet.dmac, pfvf->mac_addr); eth_broadcast_addr((u8 *)&req.mask.dmac); req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc; req.entry = entry; req.features = BIT_ULL(NPC_DMAC); req.intf = pfvf->nix_tx_intf; req.op = NIX_TX_ACTIONOP_UCAST_CHAN; req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; req.set_cntr = 1; return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } static int rvu_switch_install_rules(struct rvu *rvu) { struct rvu_switch *rswitch = &rvu->rswitch; u16 start = rswitch->start_entry; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc, entry = 0; int pf, vf, numvfs; int err; for (pf = 1; pf < hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; pcifunc = pf << 10; /* rvu_get_nix_blkaddr sets up the corresponding NIX block * address and NIX RX and TX interfaces for a pcifunc. * Generally it is called during attach call of a pcifunc but it * is called here since we are pre-installing rules before * nixlfs are attached */ rvu_get_nix_blkaddr(rvu, pcifunc); /* MCAM RX rule for a PF/VF already exists as default unicast * rules installed by AF. Hence change the channel in those * rules to ignore channel so that packets with the required * DMAC received from LBK(by other PF/VFs in system) or from * external world (from wire) are accepted. */ err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); if (err) { dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", pf, err); return err; } err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); if (err) { dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", pf, err); return err; } rswitch->entry2pcifunc[entry++] = pcifunc; rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { pcifunc = pf << 10 | ((vf + 1) & 0x3FF); rvu_get_nix_blkaddr(rvu, pcifunc); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); if (err) { dev_err(rvu->dev, "RX rule for PF%dVF%d failed(%d)\n", pf, vf, err); return err; } err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); if (err) { dev_err(rvu->dev, "TX rule for PF%dVF%d failed(%d)\n", pf, vf, err); return err; } rswitch->entry2pcifunc[entry++] = pcifunc; } } return 0; } void rvu_switch_enable(struct rvu *rvu) { struct npc_mcam_alloc_entry_req alloc_req = { 0 }; struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; struct npc_delete_flow_req uninstall_req = { 0 }; struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct msg_rsp rsp; int ret; alloc_req.contig = true; alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, &alloc_rsp); if (ret) { dev_err(rvu->dev, "Unable to allocate MCAM entries\n"); goto exit; } if (alloc_rsp.count != alloc_req.count) { dev_err(rvu->dev, "Unable to allocate %d MCAM entries, got %d\n", alloc_req.count, alloc_rsp.count); goto free_entries; } rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), GFP_KERNEL); if (!rswitch->entry2pcifunc) goto free_entries; rswitch->used_entries = alloc_rsp.count; rswitch->start_entry = alloc_rsp.entry; ret = rvu_switch_install_rules(rvu); if (ret) goto uninstall_rules; return; uninstall_rules: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); kfree(rswitch->entry2pcifunc); free_entries: free_req.all = 1; rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); exit: return; } void rvu_switch_disable(struct rvu *rvu) { struct npc_delete_flow_req uninstall_req = { 0 }; struct npc_delete_flow_rsp uninstall_rsp = { 0 }; struct npc_mcam_free_entry_req free_req = { 0 }; struct rvu_switch *rswitch = &rvu->rswitch; struct rvu_hwinfo *hw = rvu->hw; int pf, vf, numvfs; struct msg_rsp rsp; u16 pcifunc; int err; if (!rswitch->used_entries) return; for (pf = 1; pf < hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; pcifunc = pf << 10; err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, "Reverting RX rule for PF%d failed(%d)\n", pf, err); /* Disable LBK link */ rvu_switch_enable_lbk_link(rvu, pcifunc, false); rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { pcifunc = pf << 10 | ((vf + 1) & 0x3FF); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, "Reverting RX rule for PF%dVF%d failed(%d)\n", pf, vf, err); rvu_switch_enable_lbk_link(rvu, pcifunc, false); } } uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); rswitch->used_entries = 0; kfree(rswitch->entry2pcifunc); } void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) { struct rvu_switch *rswitch = &rvu->rswitch; u32 max = rswitch->used_entries; u16 entry; if (!rswitch->used_entries) return; for (entry = 0; entry < max; entry++) { if (rswitch->entry2pcifunc[entry] == pcifunc) break; } if (entry >= max) return; rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2019 Marvell. * */ #ifdef CONFIG_DEBUG_FS #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" #include "cgx.h" #include "lmac_common.h" #include "npc.h" #include "rvu_npc_hash.h" #include "mcs.h" #define DEBUGFS_DIR_NAME "octeontx2" enum { CGX_STAT0, CGX_STAT1, CGX_STAT2, CGX_STAT3, CGX_STAT4, CGX_STAT5, CGX_STAT6, CGX_STAT7, CGX_STAT8, CGX_STAT9, CGX_STAT10, CGX_STAT11, CGX_STAT12, CGX_STAT13, CGX_STAT14, CGX_STAT15, CGX_STAT16, CGX_STAT17, CGX_STAT18, }; /* NIX TX stats */ enum nix_stat_lf_tx { TX_UCAST = 0x0, TX_BCAST = 0x1, TX_MCAST = 0x2, TX_DROP = 0x3, TX_OCTS = 0x4, TX_STATS_ENUM_LAST, }; /* NIX RX stats */ enum nix_stat_lf_rx { RX_OCTS = 0x0, RX_UCAST = 0x1, RX_BCAST = 0x2, RX_MCAST = 0x3, RX_DROP = 0x4, RX_DROP_OCTS = 0x5, RX_FCS = 0x6, RX_ERR = 0x7, RX_DRP_BCAST = 0x8, RX_DRP_MCAST = 0x9, RX_DRP_L3BCAST = 0xa, RX_DRP_L3MCAST = 0xb, RX_STATS_ENUM_LAST, }; static char *cgx_rx_stats_fields[] = { [CGX_STAT0] = "Received packets", [CGX_STAT1] = "Octets of received packets", [CGX_STAT2] = "Received PAUSE packets", [CGX_STAT3] = "Received PAUSE and control packets", [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets", [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets", [CGX_STAT6] = "Packets dropped due to RX FIFO full", [CGX_STAT7] = "Octets dropped due to RX FIFO full", [CGX_STAT8] = "Error packets", [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets", [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets", [CGX_STAT11] = "NCSI-bound packets dropped", [CGX_STAT12] = "NCSI-bound octets dropped", }; static char *cgx_tx_stats_fields[] = { [CGX_STAT0] = "Packets dropped due to excessive collisions", [CGX_STAT1] = "Packets dropped due to excessive deferral", [CGX_STAT2] = "Multiple collisions before successful transmission", [CGX_STAT3] = "Single collisions before successful transmission", [CGX_STAT4] = "Total octets sent on the interface", [CGX_STAT5] = "Total frames sent on the interface", [CGX_STAT6] = "Packets sent with an octet count < 64", [CGX_STAT7] = "Packets sent with an octet count == 64", [CGX_STAT8] = "Packets sent with an octet count of 65-127", [CGX_STAT9] = "Packets sent with an octet count of 128-255", [CGX_STAT10] = "Packets sent with an octet count of 256-511", [CGX_STAT11] = "Packets sent with an octet count of 512-1023", [CGX_STAT12] = "Packets sent with an octet count of 1024-1518", [CGX_STAT13] = "Packets sent with an octet count of > 1518", [CGX_STAT14] = "Packets sent to a broadcast DMAC", [CGX_STAT15] = "Packets sent to the multicast DMAC", [CGX_STAT16] = "Transmit underflow and were truncated", [CGX_STAT17] = "Control/PAUSE packets sent", }; static char *rpm_rx_stats_fields[] = { "Octets of received packets", "Octets of received packets with out error", "Received packets with alignment errors", "Control/PAUSE packets received", "Packets received with Frame too long Errors", "Packets received with a1nrange length Errors", "Received packets", "Packets received with FrameCheckSequenceErrors", "Packets received with VLAN header", "Error packets", "Packets received with unicast DMAC", "Packets received with multicast DMAC", "Packets received with broadcast DMAC", "Dropped packets", "Total frames received on interface", "Packets received with an octet count < 64", "Packets received with an octet count == 64", "Packets received with an octet count of 65-127", "Packets received with an octet count of 128-255", "Packets received with an octet count of 256-511", "Packets received with an octet count of 512-1023", "Packets received with an octet count of 1024-1518", "Packets received with an octet count of > 1518", "Oversized Packets", "Jabber Packets", "Fragmented Packets", "CBFC(class based flow control) pause frames received for class 0", "CBFC pause frames received for class 1", "CBFC pause frames received for class 2", "CBFC pause frames received for class 3", "CBFC pause frames received for class 4", "CBFC pause frames received for class 5", "CBFC pause frames received for class 6", "CBFC pause frames received for class 7", "CBFC pause frames received for class 8", "CBFC pause frames received for class 9", "CBFC pause frames received for class 10", "CBFC pause frames received for class 11", "CBFC pause frames received for class 12", "CBFC pause frames received for class 13", "CBFC pause frames received for class 14", "CBFC pause frames received for class 15", "MAC control packets received", }; static char *rpm_tx_stats_fields[] = { "Total octets sent on the interface", "Total octets transmitted OK", "Control/Pause frames sent", "Total frames transmitted OK", "Total frames sent with VLAN header", "Error Packets", "Packets sent to unicast DMAC", "Packets sent to the multicast DMAC", "Packets sent to a broadcast DMAC", "Packets sent with an octet count == 64", "Packets sent with an octet count of 65-127", "Packets sent with an octet count of 128-255", "Packets sent with an octet count of 256-511", "Packets sent with an octet count of 512-1023", "Packets sent with an octet count of 1024-1518", "Packets sent with an octet count of > 1518", "CBFC(class based flow control) pause frames transmitted for class 0", "CBFC pause frames transmitted for class 1", "CBFC pause frames transmitted for class 2", "CBFC pause frames transmitted for class 3", "CBFC pause frames transmitted for class 4", "CBFC pause frames transmitted for class 5", "CBFC pause frames transmitted for class 6", "CBFC pause frames transmitted for class 7", "CBFC pause frames transmitted for class 8", "CBFC pause frames transmitted for class 9", "CBFC pause frames transmitted for class 10", "CBFC pause frames transmitted for class 11", "CBFC pause frames transmitted for class 12", "CBFC pause frames transmitted for class 13", "CBFC pause frames transmitted for class 14", "CBFC pause frames transmitted for class 15", "MAC control packets sent", "Total frames sent on the interface" }; enum cpt_eng_type { CPT_AE_TYPE = 1, CPT_SE_TYPE = 2, CPT_IE_TYPE = 3, }; #define rvu_dbg_NULL NULL #define rvu_dbg_open_NULL NULL #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \ static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \ { \ return single_open(file, rvu_dbg_##read_op, inode->i_private); \ } \ static const struct file_operations rvu_dbg_##name##_fops = { \ .owner = THIS_MODULE, \ .open = rvu_dbg_open_##name, \ .read = seq_read, \ .write = rvu_dbg_##write_op, \ .llseek = seq_lseek, \ .release = single_release, \ } #define RVU_DEBUG_FOPS(name, read_op, write_op) \ static const struct file_operations rvu_dbg_##name##_fops = { \ .owner = THIS_MODULE, \ .open = simple_open, \ .read = rvu_dbg_##read_op, \ .write = rvu_dbg_##write_op \ } static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf); static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir) { struct mcs *mcs = filp->private; struct mcs_port_stats stats; int lmac; seq_puts(filp, "\n port stats\n"); mutex_lock(&mcs->stats_lock); for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) { mcs_get_port_stats(mcs, &stats, lmac, dir); seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt); seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt); if (dir == MCS_RX && mcs->hw->mcs_blks > 1) seq_printf(filp, "port%d: Preempt error: %lld\n", lmac, stats.preempt_err_cnt); if (dir == MCS_TX) seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac, stats.sectag_insert_err_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX); } RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL); static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX); } RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL); static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir) { struct mcs *mcs = filp->private; struct mcs_sa_stats stats; struct rsrc_bmap *map; int sa_id; if (dir == MCS_TX) { map = &mcs->tx.sa; mutex_lock(&mcs->stats_lock); for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { seq_puts(filp, "\n TX SA stats\n"); mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX); seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id, stats.pkt_encrypt_cnt); seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id, stats.pkt_protected_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } /* RX stats */ map = &mcs->rx.sa; mutex_lock(&mcs->stats_lock); for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) { seq_puts(filp, "\n RX SA stats\n"); mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX); seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt); seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt); seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt); seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt); seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX); } RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL); static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX); } RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL); static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused) { struct mcs *mcs = filp->private; struct mcs_sc_stats stats; struct rsrc_bmap *map; int sc_id; map = &mcs->tx.sc; seq_puts(filp, "\n SC stats\n"); mutex_lock(&mcs->stats_lock); for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX); seq_printf(filp, "\n=======sc%d======\n\n", sc_id); seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt); seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt); if (mcs->hw->mcs_blks == 1) { seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id, stats.octet_encrypt_cnt); seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id, stats.octet_protected_cnt); } } mutex_unlock(&mcs->stats_lock); return 0; } RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL); static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused) { struct mcs *mcs = filp->private; struct mcs_sc_stats stats; struct rsrc_bmap *map; int sc_id; map = &mcs->rx.sc; seq_puts(filp, "\n SC stats\n"); mutex_lock(&mcs->stats_lock); for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) { mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX); seq_printf(filp, "\n=======sc%d======\n\n", sc_id); seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt); seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt); seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt); seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt); seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt); if (mcs->hw->mcs_blks > 1) { seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt); seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt); } if (mcs->hw->mcs_blks == 1) { seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id, stats.octet_decrypt_cnt); seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id, stats.octet_validate_cnt); } } mutex_unlock(&mcs->stats_lock); return 0; } RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL); static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir) { struct mcs *mcs = filp->private; struct mcs_flowid_stats stats; struct rsrc_bmap *map; int flow_id; seq_puts(filp, "\n Flowid stats\n"); if (dir == MCS_RX) map = &mcs->rx.flow_ids; else map = &mcs->tx.flow_ids; mutex_lock(&mcs->stats_lock); for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) { mcs_get_flowid_stats(mcs, &stats, flow_id, dir); seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX); } RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL); static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused) { return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX); } RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL); static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused) { struct mcs *mcs = filp->private; struct mcs_secy_stats stats; struct rsrc_bmap *map; int secy_id; map = &mcs->tx.secy; seq_puts(filp, "\n MCS TX secy stats\n"); mutex_lock(&mcs->stats_lock); for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { mcs_get_tx_secy_stats(mcs, &stats, secy_id); seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, stats.ctl_pkt_bcast_cnt); seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, stats.ctl_pkt_mcast_cnt); seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, stats.ctl_pkt_ucast_cnt); seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, stats.unctl_pkt_bcast_cnt); seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, stats.unctl_pkt_mcast_cnt); seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, stats.unctl_pkt_ucast_cnt); seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id, stats.octet_encrypted_cnt); seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id, stats.octet_protected_cnt); seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id, stats.pkt_noactivesa_cnt); seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt); seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL); static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused) { struct mcs *mcs = filp->private; struct mcs_secy_stats stats; struct rsrc_bmap *map; int secy_id; map = &mcs->rx.secy; seq_puts(filp, "\n MCS secy stats\n"); mutex_lock(&mcs->stats_lock); for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) { mcs_get_rx_secy_stats(mcs, &stats, secy_id); seq_printf(filp, "\n=======Secy%d======\n\n", secy_id); seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id, stats.ctl_pkt_bcast_cnt); seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id, stats.ctl_pkt_mcast_cnt); seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id, stats.ctl_pkt_ucast_cnt); seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt); seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id, stats.unctl_pkt_bcast_cnt); seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id, stats.unctl_pkt_mcast_cnt); seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id, stats.unctl_pkt_ucast_cnt); seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt); seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id, stats.octet_decrypted_cnt); seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id, stats.octet_validated_cnt); seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, stats.pkt_port_disabled_cnt); seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt); seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id, stats.pkt_nosa_cnt); seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, stats.pkt_nosaerror_cnt); seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id, stats.pkt_tagged_ctl_cnt); seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt); seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt); if (mcs->hw->mcs_blks > 1) seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id, stats.pkt_notag_cnt); } mutex_unlock(&mcs->stats_lock); return 0; } RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL); static void rvu_dbg_mcs_init(struct rvu *rvu) { struct mcs *mcs; char dname[10]; int i; if (!rvu->mcs_blk_cnt) return; rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); for (i = 0; i < rvu->mcs_blk_cnt; i++) { mcs = mcs_get_pdata(i); sprintf(dname, "mcs%d", i); rvu->rvu_dbg.mcs = debugfs_create_dir(dname, rvu->rvu_dbg.mcs_root); rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, &rvu_dbg_mcs_rx_flowid_stats_fops); debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, &rvu_dbg_mcs_rx_secy_stats_fops); debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs, &rvu_dbg_mcs_rx_sc_stats_fops); debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs, &rvu_dbg_mcs_rx_sa_stats_fops); debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs, &rvu_dbg_mcs_rx_port_stats_fops); rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs); debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs, &rvu_dbg_mcs_tx_flowid_stats_fops); debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs, &rvu_dbg_mcs_tx_secy_stats_fops); debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs, &rvu_dbg_mcs_tx_sc_stats_fops); debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs, &rvu_dbg_mcs_tx_sa_stats_fops); debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs, &rvu_dbg_mcs_tx_port_stats_fops); } } #define LMT_MAPTBL_ENTRY_SIZE 16 /* Dump LMTST map table */ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct rvu *rvu = filp->private_data; u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; int buf_size = 10240; size_t off = 0; int index = 0; char *buf; int ret; /* don't allow partial reads */ if (*ppos != 0) return 0; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); return false; } off += scnprintf(&buf[off], buf_size - 1 - off, "\n\t\t\t\t\tLmtst Map Table Entries"); off += scnprintf(&buf[off], buf_size - 1 - off, "\n\t\t\t\t\t======================="); off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t"); off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t"); off += scnprintf(&buf[off], buf_size - 1 - off, "Lmtline Base (word 0)\t\t"); off += scnprintf(&buf[off], buf_size - 1 - off, "Lmt Map Entry (word 1)"); off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\t\t", lmt_addr); index += 8; val = readq(lmt_map_base + index); off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", val); /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { index = (pf * rvu->hw->total_vfs * 16) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\t\t", lmt_addr); index += 8; val = readq(lmt_map_base + index); off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n", val); } } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); ret = min(off, count); if (copy_to_user(buffer, buf, ret)) ret = -EFAULT; kfree(buf); iounmap(lmt_map_base); if (ret < 0) return ret; *ppos = ret; return ret; } RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); static void get_lf_str_list(struct rvu_block block, int pcifunc, char *lfs) { int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; for_each_set_bit(lf, block.lf.bmap, block.lf.max) { if (lf >= block.lf.max) break; if (block.fn_map[lf] != pcifunc) continue; if (lf == prev_lf + 1) { prev_lf = lf; seq = 1; continue; } if (seq) len += sprintf(lfs + len, "-%d,%d", prev_lf, lf); else len += (len ? sprintf(lfs + len, ",%d", lf) : sprintf(lfs + len, "%d", lf)); prev_lf = lf; seq = 0; } if (seq) len += sprintf(lfs + len, "-%d", prev_lf); lfs[len] = '\0'; } static int get_max_column_width(struct rvu *rvu) { int index, pf, vf, lf_str_size = 12, buf_size = 256; struct rvu_block block; u16 pcifunc; char *buf; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { pcifunc = pf << 10 | vf; if (!pcifunc) continue; for (index = 0; index < BLK_COUNT; index++) { block = rvu->hw->block[index]; if (!strlen(block.name)) continue; get_lf_str_list(block, pcifunc, buf); if (lf_str_size <= strlen(buf)) lf_str_size = strlen(buf) + 1; } } } kfree(buf); return lf_str_size; } /* Dumps current provisioning status of all RVU block LFs */ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int index, off = 0, flag = 0, len = 0, i = 0; struct rvu *rvu = filp->private_data; int bytes_not_copied = 0; struct rvu_block block; int pf, vf, pcifunc; int buf_size = 2048; int lf_str_size; char *lfs; char *buf; /* don't allow partial reads */ if (*ppos != 0) return 0; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; /* Get the maximum width of a column */ lf_str_size = get_max_column_width(rvu); lfs = kzalloc(lf_str_size, GFP_KERNEL); if (!lfs) { kfree(buf); return -ENOMEM; } off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, "pcifunc"); for (index = 0; index < BLK_COUNT; index++) if (strlen(rvu->hw->block[index].name)) { off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, rvu->hw->block[index].name); } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); if (bytes_not_copied) goto out; i++; *ppos += off; for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { off = 0; flag = 0; pcifunc = pf << 10 | vf; if (!pcifunc) continue; if (vf) { sprintf(lfs, "PF%d:VF%d", pf, vf - 1); off = scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, lfs); } else { sprintf(lfs, "PF%d", pf); off = scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, lfs); } for (index = 0; index < BLK_COUNT; index++) { block = rvu->hw->block[index]; if (!strlen(block.name)) continue; len = 0; lfs[len] = '\0'; get_lf_str_list(block, pcifunc, lfs); if (strlen(lfs)) flag = 1; off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, lfs); } if (flag) { off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); bytes_not_copied = copy_to_user(buffer + (i * off), buf, off); if (bytes_not_copied) goto out; i++; *ppos += off; } } } out: kfree(lfs); kfree(buf); if (bytes_not_copied) return -EFAULT; return *ppos; } RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) { struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; u8 cgx_id, lmac_id; u16 pcifunc; domain = 2; mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); /* There can be no CGX devices at all */ if (!mac_ops) return 0; seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); if (!pdev) continue; cgx[0] = 0; lmac[0] = 0; pcifunc = pf << 10; pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->nix_blkaddr == BLKADDR_NIX0) blkid = 0; else blkid = 1; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); sprintf(cgx, "%s%d", mac_ops->name, cgx_id); sprintf(lmac, "LMAC%d", lmac_id); seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); pci_dev_put(pdev); } return 0; } RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, u16 *pcifunc) { struct rvu_block *block; struct rvu_hwinfo *hw; hw = rvu->hw; block = &hw->block[blkaddr]; if (lf < 0 || lf >= block->lf.max) { dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n", block->lf.max - 1); return false; } *pcifunc = block->fn_map[lf]; if (!*pcifunc) { dev_warn(rvu->dev, "This LF is not attached to any RVU PFFUNC\n"); return false; } return true; } static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) { char *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return; if (!pfvf->aura_ctx) { seq_puts(m, "Aura context is not initialized\n"); } else { bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap, pfvf->aura_ctx->qsize); seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize); seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf); } if (!pfvf->pool_ctx) { seq_puts(m, "Pool context is not initialized\n"); } else { bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap, pfvf->pool_ctx->qsize); seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize); seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf); } kfree(buf); } /* The 'qsize' entry dumps current Aura/Pool context Qsize * and each context's current enable/disable status in a bitmap. */ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, int blktype) { void (*print_qsize)(struct seq_file *filp, struct rvu_pfvf *pfvf) = NULL; struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; int blkaddr; rvu = filp->private; switch (blktype) { case BLKTYPE_NPA: qsize_id = rvu->rvu_dbg.npa_qsize_id; print_qsize = print_npa_qsize; break; case BLKTYPE_NIX: qsize_id = rvu->rvu_dbg.nix_qsize_id; print_qsize = print_nix_qsize; break; default: return -EINVAL; } if (blktype == BLKTYPE_NPA) { blkaddr = BLKADDR_NPA; } else { current_dir = filp->file->f_path.dentry->d_parent; blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? BLKADDR_NIX1 : BLKADDR_NIX0); } if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); print_qsize(filp, pfvf); return 0; } static ssize_t rvu_dbg_qsize_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos, int blktype) { char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; struct seq_file *seqfile = filp->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; struct dentry *current_dir; int blkaddr; u16 pcifunc; int ret, lf; cmd_buf = memdup_user(buffer, count + 1); if (IS_ERR(cmd_buf)) return -ENOMEM; cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; count = cmd_buf_tmp - cmd_buf + 1; } cmd_buf_tmp = cmd_buf; subtoken = strsep(&cmd_buf, " "); ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL; if (cmd_buf) ret = -EINVAL; if (ret < 0 || !strncmp(subtoken, "help", 4)) { dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string); goto qsize_write_done; } if (blktype == BLKTYPE_NPA) { blkaddr = BLKADDR_NPA; } else { current_dir = filp->f_path.dentry->d_parent; blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? BLKADDR_NIX1 : BLKADDR_NIX0); } if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; goto qsize_write_done; } if (blktype == BLKTYPE_NPA) rvu->rvu_dbg.npa_qsize_id = lf; else rvu->rvu_dbg.nix_qsize_id = lf; qsize_write_done: kfree(cmd_buf_tmp); return ret ? ret : count; } static ssize_t rvu_dbg_npa_qsize_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_qsize_write(filp, buffer, count, ppos, BLKTYPE_NPA); } static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused) { return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA); } RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); /* Dumps given NPA Aura's context */ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) { struct npa_aura_s *aura = &rsp->aura; struct rvu *rvu = m->private; seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", aura->ena, aura->pool_caching); seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n", aura->pool_way_mask, aura->avg_con); seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", aura->pool_drop_ena, aura->aura_drop_ena); seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", aura->bp_ena, aura->aura_drop); seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", aura->shift, aura->avg_level); seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n", (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid); seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", (u64)aura->limit, aura->bp, aura->fc_ena); if (!is_rvu_otx2(rvu)) seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", aura->fc_up_crossing, aura->fc_stype); seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", aura->pool_drop, aura->update_time); seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", aura->err_int, aura->err_int_ena); seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", aura->thresh_int, aura->thresh_int_ena); seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", aura->thresh_up, aura->thresh_qint_idx); seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); if (!is_rvu_otx2(rvu)) seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); } /* Dumps given NPA Pool's context */ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) { struct npa_pool_s *pool = &rsp->pool; struct rvu *rvu = m->private; seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", pool->ena, pool->nat_align); seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n", pool->stack_caching, pool->stack_way_mask); seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", pool->buf_offset, pool->buf_size); seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", pool->stack_max_pages, pool->stack_pages); seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc); seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", pool->stack_offset, pool->shift, pool->avg_level); seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", pool->avg_con, pool->fc_ena, pool->fc_stype); seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", pool->fc_hyst_bits, pool->fc_up_crossing); if (!is_rvu_otx2(rvu)) seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", pool->err_int, pool->err_int_ena); seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", pool->thresh_int_ena, pool->thresh_up); seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", pool->thresh_qint_idx, pool->err_qint_idx); if (!is_rvu_otx2(rvu)) seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); } /* Reads aura/pool's ctx from admin queue */ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) { void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp); struct npa_aq_enq_req aq_req; struct npa_aq_enq_rsp rsp; struct rvu_pfvf *pfvf; int aura, rc, max_id; int npalf, id, all; struct rvu *rvu; u16 pcifunc; rvu = m->private; switch (ctype) { case NPA_AQ_CTYPE_AURA: npalf = rvu->rvu_dbg.npa_aura_ctx.lf; id = rvu->rvu_dbg.npa_aura_ctx.id; all = rvu->rvu_dbg.npa_aura_ctx.all; break; case NPA_AQ_CTYPE_POOL: npalf = rvu->rvu_dbg.npa_pool_ctx.lf; id = rvu->rvu_dbg.npa_pool_ctx.id; all = rvu->rvu_dbg.npa_pool_ctx.all; break; default: return -EINVAL; } if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) { seq_puts(m, "Aura context is not initialized\n"); return -EINVAL; } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) { seq_puts(m, "Pool context is not initialized\n"); return -EINVAL; } memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); aq_req.hdr.pcifunc = pcifunc; aq_req.ctype = ctype; aq_req.op = NPA_AQ_INSTOP_READ; if (ctype == NPA_AQ_CTYPE_AURA) { max_id = pfvf->aura_ctx->qsize; print_npa_ctx = print_npa_aura_ctx; } else { max_id = pfvf->pool_ctx->qsize; print_npa_ctx = print_npa_pool_ctx; } if (id < 0 || id >= max_id) { seq_printf(m, "Invalid %s, valid range is 0-%d\n", (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", max_id - 1); return -EINVAL; } if (all) id = 0; else max_id = id + 1; for (aura = id; aura < max_id; aura++) { aq_req.aura_id = aura; /* Skip if queue is uninitialized */ if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap)) continue; seq_printf(m, "======%s : %d=======\n", (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL", aq_req.aura_id); rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp); if (rc) { seq_puts(m, "Failed to read context\n"); return -EINVAL; } print_npa_ctx(m, &rsp); } return 0; } static int write_npa_ctx(struct rvu *rvu, bool all, int npalf, int id, int ctype) { struct rvu_pfvf *pfvf; int max_id = 0; u16 pcifunc; if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); if (ctype == NPA_AQ_CTYPE_AURA) { if (!pfvf->aura_ctx) { dev_warn(rvu->dev, "Aura context is not initialized\n"); return -EINVAL; } max_id = pfvf->aura_ctx->qsize; } else if (ctype == NPA_AQ_CTYPE_POOL) { if (!pfvf->pool_ctx) { dev_warn(rvu->dev, "Pool context is not initialized\n"); return -EINVAL; } max_id = pfvf->pool_ctx->qsize; } if (id < 0 || id >= max_id) { dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n", (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool", max_id - 1); return -EINVAL; } switch (ctype) { case NPA_AQ_CTYPE_AURA: rvu->rvu_dbg.npa_aura_ctx.lf = npalf; rvu->rvu_dbg.npa_aura_ctx.id = id; rvu->rvu_dbg.npa_aura_ctx.all = all; break; case NPA_AQ_CTYPE_POOL: rvu->rvu_dbg.npa_pool_ctx.lf = npalf; rvu->rvu_dbg.npa_pool_ctx.id = id; rvu->rvu_dbg.npa_pool_ctx.all = all; break; default: return -EINVAL; } return 0; } static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count, const char __user *buffer, int *npalf, int *id, bool *all) { int bytes_not_copied; char *cmd_buf_tmp; char *subtoken; int ret; bytes_not_copied = copy_from_user(cmd_buf, buffer, *count); if (bytes_not_copied) return -EFAULT; cmd_buf[*count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; *count = cmd_buf_tmp - cmd_buf + 1; } subtoken = strsep(&cmd_buf, " "); ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL; if (ret < 0) return ret; subtoken = strsep(&cmd_buf, " "); if (subtoken && strcmp(subtoken, "all") == 0) { *all = true; } else { ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL; if (ret < 0) return ret; } if (cmd_buf) return -EINVAL; return ret; } static ssize_t rvu_dbg_npa_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos, int ctype) { char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool"; struct seq_file *seqfp = filp->private_data; struct rvu *rvu = seqfp->private; int npalf, id = 0, ret; bool all = false; if ((*ppos != 0) || !count) return -EINVAL; cmd_buf = kzalloc(count + 1, GFP_KERNEL); if (!cmd_buf) return count; ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, &npalf, &id, &all); if (ret < 0) { dev_info(rvu->dev, "Usage: echo <npalf> [%s number/all] > %s_ctx\n", ctype_string, ctype_string); goto done; } else { ret = write_npa_ctx(rvu, all, npalf, id, ctype); } done: kfree(cmd_buf); return ret ? ret : count; } static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, NPA_AQ_CTYPE_AURA); } static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused) { return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA); } RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write); static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos, NPA_AQ_CTYPE_POOL); } static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused) { return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL); } RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write); static void ndc_cache_stats(struct seq_file *s, int blk_addr, int ctype, int transaction) { u64 req, out_req, lat, cant_alloc; struct nix_hw *nix_hw; struct rvu *rvu; int port; if (blk_addr == BLKADDR_NDC_NPA0) { rvu = s->private; } else { nix_hw = s->private; rvu = nix_hw->rvu; } for (port = 0; port < NDC_MAX_PORT; port++) { req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC (port, ctype, transaction)); lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC (port, ctype, transaction)); out_req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_OSTDN_PC (port, ctype, transaction)); cant_alloc = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_CANT_ALLOC_PC (port, transaction)); seq_printf(s, "\nPort:%d\n", port); seq_printf(s, "\tTotal Requests:\t\t%lld\n", req); seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat); seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req); seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req); seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc); } } static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr) { seq_puts(s, "\n***** CACHE mode read stats *****\n"); ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS); seq_puts(s, "\n***** CACHE mode write stats *****\n"); ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS); seq_puts(s, "\n***** BY-PASS mode read stats *****\n"); ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS); seq_puts(s, "\n***** BY-PASS mode write stats *****\n"); ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS); return 0; } static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused) { return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); } RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) { struct nix_hw *nix_hw; struct rvu *rvu; int bank, max_bank; u64 ndc_af_const; if (blk_addr == BLKADDR_NDC_NPA0) { rvu = s->private; } else { nix_hw = s->private; rvu = nix_hw->rvu; } ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST); max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); for (bank = 0; bank < max_bank; bank++) { seq_printf(s, "BANK:%d\n", bank); seq_printf(s, "\tHits:\t%lld\n", (u64)rvu_read64(rvu, blk_addr, NDC_AF_BANKX_HIT_PC(bank))); seq_printf(s, "\tMiss:\t%lld\n", (u64)rvu_read64(rvu, blk_addr, NDC_AF_BANKX_MISS_PC(bank))); } return 0; } static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) { struct nix_hw *nix_hw = filp->private; int blkaddr = 0; int ndc_idx = 0; blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX); return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) { struct nix_hw *nix_hw = filp->private; int blkaddr = 0; int ndc_idx = 0; blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX); return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp, void *unused) { return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0); } RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, void *unused) { struct nix_hw *nix_hw = filp->private; int ndc_idx = NPA0_U; int blkaddr = 0; blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, void *unused) { struct nix_hw *nix_hw = filp->private; int ndc_idx = NPA0_U; int blkaddr = 0; blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); static void print_nix_cn10k_sq_ctx(struct seq_file *m, struct nix_cn10k_sq_ctx_s *sq_ctx) { seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", sq_ctx->ena, sq_ctx->qint_idx); seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", sq_ctx->substream, sq_ctx->sdp_mcast); seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", sq_ctx->cq, sq_ctx->sqe_way_mask); seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", sq_ctx->sso_ena, sq_ctx->smq_rr_weight); seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", sq_ctx->default_chan, sq_ctx->sqb_count); seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", sq_ctx->sqb_aura, sq_ctx->sq_int); seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", sq_ctx->sq_int_ena, sq_ctx->sqe_stype); seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", sq_ctx->max_sqe_size, sq_ctx->cq_limit); seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", sq_ctx->mnq_dis, sq_ctx->lmt_dis); seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", sq_ctx->tail_offset, sq_ctx->smenq_offset); seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", sq_ctx->smenq_next_sqb); seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", (u64)sq_ctx->scm_lso_rem); seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", (u64)sq_ctx->dropped_octs); seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", (u64)sq_ctx->dropped_pkts); } /* Dumps given nix_sq's context */ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_sq_ctx_s *sq_ctx = &rsp->sq; struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; if (!is_rvu_otx2(rvu)) { print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); return; } seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", sq_ctx->sqe_way_mask, sq_ctx->cq); seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", sq_ctx->sdp_mcast, sq_ctx->substream); seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n", sq_ctx->qint_idx, sq_ctx->ena); seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n", sq_ctx->sqb_count, sq_ctx->default_chan); seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n", sq_ctx->smq_rr_quantum, sq_ctx->sso_ena); seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n", sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq); seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n", sq_ctx->sqe_stype, sq_ctx->sq_int_ena); seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n", sq_ctx->sq_int, sq_ctx->sqb_aura); seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count); seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n", sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset); seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n", sq_ctx->smenq_offset, sq_ctx->tail_offset); seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n", sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq); seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n", sq_ctx->mnq_dis, sq_ctx->lmt_dis); seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n", sq_ctx->cq_limit, sq_ctx->max_sqe_size); seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", sq_ctx->smenq_next_sqb); seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n", sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n", sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps); seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n", sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1); seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total); seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", (u64)sq_ctx->scm_lso_rem); seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", (u64)sq_ctx->dropped_octs); seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", (u64)sq_ctx->dropped_pkts); } static void print_nix_cn10k_rq_ctx(struct seq_file *m, struct nix_cn10k_rq_ctx_s *rq_ctx) { seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", rq_ctx->ena, rq_ctx->sso_ena); seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", rq_ctx->cq, rq_ctx->lenerr_dis); seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", rq_ctx->spb_aura, rq_ctx->lpb_aura); seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", rq_ctx->sso_grp, rq_ctx->sso_tt); seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", rq_ctx->pb_caching, rq_ctx->wqe_caching); seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", rq_ctx->wqe_skip, rq_ctx->spb_ena); seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", rq_ctx->lpb_sizem1, rq_ctx->first_skip); seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", rq_ctx->later_skip, rq_ctx->xqe_imm_size); seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", rq_ctx->xqe_drop, rq_ctx->xqe_pass); seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", rq_ctx->rq_int, rq_ctx->rq_int_ena); seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", rq_ctx->ltag, rq_ctx->good_utag); seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", rq_ctx->bad_utag, rq_ctx->flow_tagw); seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); } /* Dumps given nix_rq's context */ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_rq_ctx_s *rq_ctx = &rsp->rq; struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; if (!is_rvu_otx2(rvu)) { print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); return; } seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", rq_ctx->wqe_aura, rq_ctx->substream); seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", rq_ctx->cq, rq_ctx->ena_wqwd); seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", rq_ctx->ipsech_ena, rq_ctx->sso_ena); seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena); seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena); seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n", rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching); seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n", rq_ctx->pb_caching, rq_ctx->sso_tt); seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", rq_ctx->sso_grp, rq_ctx->lpb_aura); seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura); seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n", rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy); seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n", rq_ctx->xqe_imm_size, rq_ctx->later_skip); seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n", rq_ctx->first_skip, rq_ctx->lpb_sizem1); seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n", rq_ctx->spb_ena, rq_ctx->wqe_skip); seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1); seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n", rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop); seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n", rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n", rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop); seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n", rq_ctx->xqe_pass, rq_ctx->xqe_drop); seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n", rq_ctx->qint_idx, rq_ctx->rq_int_ena); seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n", rq_ctx->rq_int, rq_ctx->lpb_pool_pass); seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n", rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass); seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop); seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n", rq_ctx->flow_tagw, rq_ctx->bad_utag); seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n", rq_ctx->good_utag, rq_ctx->ltag); seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); } /* Dumps given nix_cq's context */ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_cq_ctx_s *cq_ctx = &rsp->cq; seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", cq_ctx->avg_con, cq_ctx->cint_idx); seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", cq_ctx->cq_err, cq_ctx->qint_idx); seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", cq_ctx->bpid, cq_ctx->bp_ena); seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", cq_ctx->update_time, cq_ctx->avg_level); seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", cq_ctx->head, cq_ctx->tail); seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", cq_ctx->qsize, cq_ctx->caching); seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", cq_ctx->substream, cq_ctx->ena); seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", cq_ctx->drop_ena, cq_ctx->drop); seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); } static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, void *unused, int ctype) { void (*print_nix_ctx)(struct seq_file *filp, struct nix_aq_enq_rsp *rsp) = NULL; struct nix_hw *nix_hw = filp->private; struct rvu *rvu = nix_hw->rvu; struct nix_aq_enq_req aq_req; struct nix_aq_enq_rsp rsp; char *ctype_string = NULL; int qidx, rc, max_id = 0; struct rvu_pfvf *pfvf; int nixlf, id, all; u16 pcifunc; switch (ctype) { case NIX_AQ_CTYPE_CQ: nixlf = rvu->rvu_dbg.nix_cq_ctx.lf; id = rvu->rvu_dbg.nix_cq_ctx.id; all = rvu->rvu_dbg.nix_cq_ctx.all; break; case NIX_AQ_CTYPE_SQ: nixlf = rvu->rvu_dbg.nix_sq_ctx.lf; id = rvu->rvu_dbg.nix_sq_ctx.id; all = rvu->rvu_dbg.nix_sq_ctx.all; break; case NIX_AQ_CTYPE_RQ: nixlf = rvu->rvu_dbg.nix_rq_ctx.lf; id = rvu->rvu_dbg.nix_rq_ctx.id; all = rvu->rvu_dbg.nix_rq_ctx.all; break; default: return -EINVAL; } if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) { seq_puts(filp, "SQ context is not initialized\n"); return -EINVAL; } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) { seq_puts(filp, "RQ context is not initialized\n"); return -EINVAL; } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) { seq_puts(filp, "CQ context is not initialized\n"); return -EINVAL; } if (ctype == NIX_AQ_CTYPE_SQ) { max_id = pfvf->sq_ctx->qsize; ctype_string = "sq"; print_nix_ctx = print_nix_sq_ctx; } else if (ctype == NIX_AQ_CTYPE_RQ) { max_id = pfvf->rq_ctx->qsize; ctype_string = "rq"; print_nix_ctx = print_nix_rq_ctx; } else if (ctype == NIX_AQ_CTYPE_CQ) { max_id = pfvf->cq_ctx->qsize; ctype_string = "cq"; print_nix_ctx = print_nix_cq_ctx; } memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); aq_req.hdr.pcifunc = pcifunc; aq_req.ctype = ctype; aq_req.op = NIX_AQ_INSTOP_READ; if (all) id = 0; else max_id = id + 1; for (qidx = id; qidx < max_id; qidx++) { aq_req.qidx = qidx; seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n", ctype_string, nixlf, aq_req.qidx); rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); if (rc) { seq_puts(filp, "Failed to read the context\n"); return -EINVAL; } print_nix_ctx(filp, &rsp); } return 0; } static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, int id, int ctype, char *ctype_string, struct seq_file *m) { struct nix_hw *nix_hw = m->private; struct rvu_pfvf *pfvf; int max_id = 0; u16 pcifunc; if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); if (ctype == NIX_AQ_CTYPE_SQ) { if (!pfvf->sq_ctx) { dev_warn(rvu->dev, "SQ context is not initialized\n"); return -EINVAL; } max_id = pfvf->sq_ctx->qsize; } else if (ctype == NIX_AQ_CTYPE_RQ) { if (!pfvf->rq_ctx) { dev_warn(rvu->dev, "RQ context is not initialized\n"); return -EINVAL; } max_id = pfvf->rq_ctx->qsize; } else if (ctype == NIX_AQ_CTYPE_CQ) { if (!pfvf->cq_ctx) { dev_warn(rvu->dev, "CQ context is not initialized\n"); return -EINVAL; } max_id = pfvf->cq_ctx->qsize; } if (id < 0 || id >= max_id) { dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n", ctype_string, max_id - 1); return -EINVAL; } switch (ctype) { case NIX_AQ_CTYPE_CQ: rvu->rvu_dbg.nix_cq_ctx.lf = nixlf; rvu->rvu_dbg.nix_cq_ctx.id = id; rvu->rvu_dbg.nix_cq_ctx.all = all; break; case NIX_AQ_CTYPE_SQ: rvu->rvu_dbg.nix_sq_ctx.lf = nixlf; rvu->rvu_dbg.nix_sq_ctx.id = id; rvu->rvu_dbg.nix_sq_ctx.all = all; break; case NIX_AQ_CTYPE_RQ: rvu->rvu_dbg.nix_rq_ctx.lf = nixlf; rvu->rvu_dbg.nix_rq_ctx.id = id; rvu->rvu_dbg.nix_rq_ctx.all = all; break; default: return -EINVAL; } return 0; } static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos, int ctype) { struct seq_file *m = filp->private_data; struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; char *cmd_buf, *ctype_string; int nixlf, id = 0, ret; bool all = false; if ((*ppos != 0) || !count) return -EINVAL; switch (ctype) { case NIX_AQ_CTYPE_SQ: ctype_string = "sq"; break; case NIX_AQ_CTYPE_RQ: ctype_string = "rq"; break; case NIX_AQ_CTYPE_CQ: ctype_string = "cq"; break; default: return -EINVAL; } cmd_buf = kzalloc(count + 1, GFP_KERNEL); if (!cmd_buf) return count; ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer, &nixlf, &id, &all); if (ret < 0) { dev_info(rvu->dev, "Usage: echo <nixlf> [%s number/all] > %s_ctx\n", ctype_string, ctype_string); goto done; } else { ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, ctype_string, m); } done: kfree(cmd_buf); return ret ? ret : count; } static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, NIX_AQ_CTYPE_SQ); } static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused) { return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ); } RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write); static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, NIX_AQ_CTYPE_RQ); } static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused) { return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ); } RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write); static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos, NIX_AQ_CTYPE_CQ); } static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused) { return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ); } RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write); static void print_nix_qctx_qsize(struct seq_file *filp, int qsize, unsigned long *bmap, char *qtype) { char *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return; bitmap_print_to_pagebuf(false, buf, bmap, qsize); seq_printf(filp, "%s context count : %d\n", qtype, qsize); seq_printf(filp, "%s context ena/dis bitmap : %s\n", qtype, buf); kfree(buf); } static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf) { if (!pfvf->cq_ctx) seq_puts(filp, "cq context is not initialized\n"); else print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap, "cq"); if (!pfvf->rq_ctx) seq_puts(filp, "rq context is not initialized\n"); else print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap, "rq"); if (!pfvf->sq_ctx) seq_puts(filp, "sq context is not initialized\n"); else print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap, "sq"); } static ssize_t rvu_dbg_nix_qsize_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { return rvu_dbg_qsize_write(filp, buffer, count, ppos, BLKTYPE_NIX); } static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) { return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX); } RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); static void print_band_prof_ctx(struct seq_file *m, struct nix_bandprof_s *prof) { char *str; switch (prof->pc_mode) { case NIX_RX_PC_MODE_VLAN: str = "VLAN"; break; case NIX_RX_PC_MODE_DSCP: str = "DSCP"; break; case NIX_RX_PC_MODE_GEN: str = "Generic"; break; case NIX_RX_PC_MODE_RSVD: str = "Reserved"; break; } seq_printf(m, "W0: pc_mode\t\t%s\n", str); str = (prof->icolor == 3) ? "Color blind" : (prof->icolor == 0) ? "Green" : (prof->icolor == 1) ? "Yellow" : "Red"; seq_printf(m, "W0: icolor\t\t%s\n", str); seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena); seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent); seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent); seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent); seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent); seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa); seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa); seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa); seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa); str = (prof->lmode == 0) ? "byte" : "packet"; seq_printf(m, "W1: lmode\t\t%s\n", str); seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect); seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv); seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent); seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa); str = (prof->gc_action == 0) ? "PASS" : (prof->gc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: gc_action\t\t%s\n", str); str = (prof->yc_action == 0) ? "PASS" : (prof->yc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: yc_action\t\t%s\n", str); str = (prof->rc_action == 0) ? "PASS" : (prof->rc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: rc_action\t\t%s\n", str); seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum); seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum); seq_printf(m, "W4: green_pkt_pass\t%lld\n", (u64)prof->green_pkt_pass); seq_printf(m, "W5: yellow_pkt_pass\t%lld\n", (u64)prof->yellow_pkt_pass); seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass); seq_printf(m, "W7: green_octs_pass\t%lld\n", (u64)prof->green_octs_pass); seq_printf(m, "W8: yellow_octs_pass\t%lld\n", (u64)prof->yellow_octs_pass); seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass); seq_printf(m, "W10: green_pkt_drop\t%lld\n", (u64)prof->green_pkt_drop); seq_printf(m, "W11: yellow_pkt_drop\t%lld\n", (u64)prof->yellow_pkt_drop); seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop); seq_printf(m, "W13: green_octs_drop\t%lld\n", (u64)prof->green_octs_drop); seq_printf(m, "W14: yellow_octs_drop\t%lld\n", (u64)prof->yellow_octs_drop); seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop); seq_puts(m, "==============================\n"); } static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) { struct nix_hw *nix_hw = m->private; struct nix_cn10k_aq_enq_req aq_req; struct nix_cn10k_aq_enq_rsp aq_rsp; struct rvu *rvu = nix_hw->rvu; struct nix_ipolicer *ipolicer; int layer, prof_idx, idx, rc; u16 pcifunc; char *str; /* Ingress policers do not exist on all platforms */ if (!nix_hw->ipolicer) return 0; for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top"; seq_printf(m, "\n%s bandwidth profiles\n", str); seq_puts(m, "=======================\n"); ipolicer = &nix_hw->ipolicer[layer]; for (idx = 0; idx < ipolicer->band_prof.max; idx++) { if (is_rsrc_free(&ipolicer->band_prof, idx)) continue; prof_idx = (idx & 0x3FFF) | (layer << 14); rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, NIX_AQ_CTYPE_BANDPROF, prof_idx); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch context of %s profile %d, err %d\n", __func__, str, idx, rc); return 0; } seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx); pcifunc = ipolicer->pfvf_map[idx]; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(m, "Allocated to :: PF %d\n", rvu_get_pf(pcifunc)); else seq_printf(m, "Allocated to :: PF %d VF %d\n", rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); print_band_prof_ctx(m, &aq_rsp.prof); } } return 0; } RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL); static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused) { struct nix_hw *nix_hw = m->private; struct nix_ipolicer *ipolicer; int layer; char *str; /* Ingress policers do not exist on all platforms */ if (!nix_hw->ipolicer) return 0; seq_puts(m, "\nBandwidth profile resource free count\n"); seq_puts(m, "=====================================\n"); for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" : (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top "; ipolicer = &nix_hw->ipolicer[layer]; seq_printf(m, "%s :: Max: %4d Free: %4d\n", str, ipolicer->band_prof.max, rvu_rsrc_free_count(&ipolicer->band_prof)); } seq_puts(m, "=====================================\n"); return 0; } RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL); static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) { struct nix_hw *nix_hw; if (!is_block_implemented(rvu->hw, blkaddr)) return; if (blkaddr == BLKADDR_NIX0) { rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); nix_hw = &rvu->hw->nix[0]; } else { rvu->rvu_dbg.nix = debugfs_create_dir("nix1", rvu->rvu_dbg.root); nix_hw = &rvu->hw->nix[1]; } debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_sq_ctx_fops); debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_rq_ctx_fops); debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_cq_ctx_fops); debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_tx_cache_fops); debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_cache_fops); debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_tx_hits_miss_fops); debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_hits_miss_fops); debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, &rvu_dbg_nix_qsize_fops); debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_ctx_fops); debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_rsrc_fops); } static void rvu_dbg_npa_init(struct rvu *rvu) { rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_qsize_fops); debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_aura_ctx_fops); debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_pool_ctx_fops); debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_ndc_cache_fops); debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_ndc_hits_miss_fops); } #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ ({ \ u64 cnt; \ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ NIX_STATS_RX, &(cnt)); \ if (!err) \ seq_printf(s, "%s: %llu\n", name, cnt); \ cnt; \ }) #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \ ({ \ u64 cnt; \ err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \ NIX_STATS_TX, &(cnt)); \ if (!err) \ seq_printf(s, "%s: %llu\n", name, cnt); \ cnt; \ }) static int cgx_print_stats(struct seq_file *s, int lmac_id) { struct cgx_link_user_info linfo; struct mac_ops *mac_ops; void *cgxd = s->private; u64 ucast, mcast, bcast; int stat = 0, err = 0; u64 tx_stat, rx_stat; struct rvu *rvu; rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); if (!rvu) return -ENODEV; mac_ops = get_mac_ops(cgxd); /* There can be no CGX devices at all */ if (!mac_ops) return 0; /* Link status */ seq_puts(s, "\n=======Link Status======\n\n"); err = cgx_get_link_info(cgxd, lmac_id, &linfo); if (err) seq_puts(s, "Failed to read link status\n"); seq_printf(s, "\nLink is %s %d Mbps\n\n", linfo.link_up ? "UP" : "DOWN", linfo.speed); /* Rx stats */ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", mac_ops->name); ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); if (err) return err; mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames"); if (err) return err; bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames"); if (err) return err; seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast); PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes"); if (err) return err; PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops"); if (err) return err; PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors"); if (err) return err; /* Tx stats */ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", mac_ops->name); ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); if (err) return err; mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames"); if (err) return err; bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames"); if (err) return err; seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast); PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes"); if (err) return err; PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops"); if (err) return err; /* Rx stats */ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); while (stat < mac_ops->rx_stats_cnt) { err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); if (err) return err; if (is_rvu_otx2(rvu)) seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat); else seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], rx_stat); stat++; } /* Tx stats */ stat = 0; seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); while (stat < mac_ops->tx_stats_cnt) { err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); if (err) return err; if (is_rvu_otx2(rvu)) seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat); else seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], tx_stat); stat++; } return err; } static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; char *buf; current_dir = filp->file->f_path.dentry->d_parent; buf = strrchr(current_dir->d_name.name, 'c'); if (!buf) return -EINVAL; return kstrtoint(buf + 1, 10, lmac_id); } static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) { int lmac_id, err; err = rvu_dbg_derive_lmacid(filp, &lmac_id); if (!err) return cgx_print_stats(filp, lmac_id); return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) { struct pci_dev *pdev = NULL; void *cgxd = s->private; char *bcast, *mcast; u16 index, domain; u8 dmac[ETH_ALEN]; struct rvu *rvu; u64 cfg, mac; int pf; rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); if (!rvu) return -ENODEV; pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); domain = 2; pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); if (!pdev) return 0; cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; seq_puts(s, "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); seq_printf(s, "%s PF%d %9s %9s", dev_name(&pdev->dev), pf, bcast, mcast); if (cfg & CGX_DMAC_CAM_ACCEPT) seq_printf(s, "%12s\n\n", "UNICAST"); else seq_printf(s, "%16s\n\n", "PROMISCUOUS"); seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); for (index = 0 ; index < 32 ; index++) { cfg = cgx_read_dmac_entry(cgxd, index); /* Display enabled dmac entries associated with current lmac */ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); u64_to_ether_addr(mac, dmac); seq_printf(s, "%7d %pM\n", index, dmac); } } pci_dev_put(pdev); return 0; } static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) { int err, lmac_id; err = rvu_dbg_derive_lmacid(filp, &lmac_id); if (!err) return cgx_print_dmac_flt(filp, lmac_id); return err; } RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; unsigned long lmac_bmap; int i, lmac_id; char dname[20]; void *cgx; if (!cgx_get_cgxcnt_max()) return; mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); if (!mac_ops) return; rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, rvu->rvu_dbg.root); for (i = 0; i < cgx_get_cgxcnt_max(); i++) { cgx = rvu_cgx_pdata(i, rvu); if (!cgx) continue; lmac_bmap = cgx_get_lmac_bmap(cgx); /* cgx debugfs dir */ sprintf(dname, "%s%d", mac_ops->name, i); rvu->rvu_dbg.cgx = debugfs_create_dir(dname, rvu->rvu_dbg.cgx_root); for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { /* lmac debugfs dir */ sprintf(dname, "lmac%d", lmac_id); rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); debugfs_create_file("mac_filter", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_dmac_flt_fops); } } } /* NPC debugfs APIs */ static void rvu_print_npc_mcam_info(struct seq_file *s, u16 pcifunc, int blkaddr) { struct rvu *rvu = s->private; int entry_acnt, entry_ecnt; int cntr_acnt, cntr_ecnt; rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, &entry_acnt, &entry_ecnt); rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, &cntr_acnt, &cntr_ecnt); if (!entry_acnt && !cntr_acnt) return; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(s, "\n\t\t Device \t\t: PF%d\n", rvu_get_pf(pcifunc)); else seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); if (entry_acnt) { seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt); seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt); } if (cntr_acnt) { seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt); seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt); } } static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) { struct rvu *rvu = filp->private; int pf, vf, numvfs, blkaddr; struct npc_mcam *mcam; u16 pcifunc, counters; u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return -ENODEV; mcam = &rvu->hw->mcam; counters = rvu->hw->npc_counters; seq_puts(filp, "\nNPC MCAM info:\n"); /* MCAM keywidth on receive and transmit sides */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX)); cfg = (cfg >> 32) & 0x07; seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? "224bits" : "448bits")); cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX)); cfg = (cfg >> 32) & 0x07; seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ? "112bits" : ((cfg == NPC_MCAM_KEY_X2) ? "224bits" : "448bits")); mutex_lock(&mcam->lock); /* MCAM entries */ seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries); seq_printf(filp, "\t\t Reserved \t: %d\n", mcam->total_entries - mcam->bmap_entries); seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); /* MCAM counters */ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters); seq_printf(filp, "\t\t Reserved \t: %d\n", counters - mcam->counters.max); seq_printf(filp, "\t\t Available \t: %d\n", rvu_rsrc_free_count(&mcam->counters)); if (mcam->bmap_entries == mcam->bmap_fcnt) { mutex_unlock(&mcam->lock); return 0; } seq_puts(filp, "\n\t\t Current allocation\n"); seq_puts(filp, "\t\t====================\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { pcifunc = (pf << RVU_PFVF_PF_SHIFT); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; for (vf = 0; vf < numvfs; vf++) { pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); } } mutex_unlock(&mcam->lock); return 0; } RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL); static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, void *unused) { struct rvu *rvu = filp->private; struct npc_mcam *mcam; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return -ENODEV; mcam = &rvu->hw->mcam; seq_puts(filp, "\nNPC MCAM RX miss action stats\n"); seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr, rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr))); return 0; } RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { u8 bit; for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { seq_printf(s, "\t%s ", npc_get_field_name(bit)); switch (bit) { case NPC_LXMB: if (rule->lxmb == 1) seq_puts(s, "\tL2M nibble is set\n"); else seq_puts(s, "\tL2B nibble is set\n"); break; case NPC_DMAC: seq_printf(s, "%pM ", rule->packet.dmac); seq_printf(s, "mask %pM\n", rule->mask.dmac); break; case NPC_SMAC: seq_printf(s, "%pM ", rule->packet.smac); seq_printf(s, "mask %pM\n", rule->mask.smac); break; case NPC_ETYPE: seq_printf(s, "0x%x ", ntohs(rule->packet.etype)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); break; case NPC_OUTER_VID: seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_tci)); break; case NPC_INNER_VID: seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.vlan_itci)); break; case NPC_TOS: seq_printf(s, "%d ", rule->packet.tos); seq_printf(s, "mask 0x%x\n", rule->mask.tos); break; case NPC_SIP_IPV4: seq_printf(s, "%pI4 ", &rule->packet.ip4src); seq_printf(s, "mask %pI4\n", &rule->mask.ip4src); break; case NPC_DIP_IPV4: seq_printf(s, "%pI4 ", &rule->packet.ip4dst); seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst); break; case NPC_SIP_IPV6: seq_printf(s, "%pI6 ", rule->packet.ip6src); seq_printf(s, "mask %pI6\n", rule->mask.ip6src); break; case NPC_DIP_IPV6: seq_printf(s, "%pI6 ", rule->packet.ip6dst); seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); break; case NPC_IPFRAG_IPV6: seq_printf(s, "0x%x ", rule->packet.next_header); seq_printf(s, "mask 0x%x\n", rule->mask.next_header); break; case NPC_IPFRAG_IPV4: seq_printf(s, "0x%x ", rule->packet.ip_flag); seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag); break; case NPC_SPORT_TCP: case NPC_SPORT_UDP: case NPC_SPORT_SCTP: seq_printf(s, "%d ", ntohs(rule->packet.sport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport)); break; case NPC_DPORT_TCP: case NPC_DPORT_UDP: case NPC_DPORT_SCTP: seq_printf(s, "%d ", ntohs(rule->packet.dport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; case NPC_IPSEC_SPI: seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); break; default: seq_puts(s, "\n"); break; } } } static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { if (is_npc_intf_tx(rule->intf)) { switch (rule->tx_action.op) { case NIX_TX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); break; case NIX_TX_ACTIONOP_UCAST_DEFAULT: seq_puts(s, "\taction: Unicast to default channel\n"); break; case NIX_TX_ACTIONOP_UCAST_CHAN: seq_printf(s, "\taction: Unicast to channel %d\n", rule->tx_action.index); break; case NIX_TX_ACTIONOP_MCAST: seq_puts(s, "\taction: Multicast\n"); break; case NIX_TX_ACTIONOP_DROP_VIOL: seq_puts(s, "\taction: Lockdown Violation Drop\n"); break; default: break; } } else { switch (rule->rx_action.op) { case NIX_RX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); break; case NIX_RX_ACTIONOP_UCAST: seq_printf(s, "\taction: Direct to queue %d\n", rule->rx_action.index); break; case NIX_RX_ACTIONOP_RSS: seq_puts(s, "\taction: RSS\n"); break; case NIX_RX_ACTIONOP_UCAST_IPSEC: seq_puts(s, "\taction: Unicast ipsec\n"); break; case NIX_RX_ACTIONOP_MCAST: seq_puts(s, "\taction: Multicast\n"); break; default: break; } } } static const char *rvu_dbg_get_intf_name(int intf) { switch (intf) { case NIX_INTFX_RX(0): return "NIX0_RX"; case NIX_INTFX_RX(1): return "NIX1_RX"; case NIX_INTFX_TX(0): return "NIX0_TX"; case NIX_INTFX_TX(1): return "NIX1_TX"; default: break; } return "unknown"; } static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) { struct rvu_npc_mcam_rule *iter; struct rvu *rvu = s->private; struct npc_mcam *mcam; int pf, vf = -1; bool enabled; int blkaddr; u16 target; u64 hits; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return 0; mcam = &rvu->hw->mcam; mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; seq_printf(s, "\n\tInstalled by: PF%d ", pf); if (iter->owner & RVU_PFVF_FUNC_MASK) { vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1; seq_printf(s, "VF%d", vf); } seq_puts(s, "\n"); seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ? "RX" : "TX"); seq_printf(s, "\tinterface: %s\n", rvu_dbg_get_intf_name(iter->intf)); seq_printf(s, "\tmcam entry: %d\n", iter->entry); rvu_dbg_npc_mcam_show_flows(s, iter); if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; seq_printf(s, "\tForward to: PF%d ", pf); if (target & RVU_PFVF_FUNC_MASK) { vf = (target & RVU_PFVF_FUNC_MASK) - 1; seq_printf(s, "VF%d", vf); } seq_puts(s, "\n"); seq_printf(s, "\tchannel: 0x%x\n", iter->chan); seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask); } rvu_dbg_npc_mcam_show_action(s, iter); enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry); seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no"); if (!iter->has_cntr) continue; seq_printf(s, "\tcounter: %d\n", iter->cntr); hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); seq_printf(s, "\thits: %lld\n", hits); } mutex_unlock(&mcam->lock); return 0; } RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused) { struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 }; struct npc_exact_table_entry *cam_entry; struct npc_exact_table *table; struct rvu *rvu = s->private; int i, j; u8 bitmap = 0; table = rvu->hw->table; mutex_lock(&table->lock); /* Check if there is at least one entry in mem table */ if (!table->mem_tbl_entry_cnt) goto dump_cam_table; /* Print table headers */ seq_puts(s, "\n\tExact Match MEM Table\n"); seq_puts(s, "Index\t"); for (i = 0; i < table->mem_table.ways; i++) { mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i], struct npc_exact_table_entry, list); seq_printf(s, "Way-%d\t\t\t\t\t", i); } seq_puts(s, "\n"); for (i = 0; i < table->mem_table.ways; i++) seq_puts(s, "\tChan MAC \t"); seq_puts(s, "\n\n"); /* Print mem table entries */ for (i = 0; i < table->mem_table.depth; i++) { bitmap = 0; for (j = 0; j < table->mem_table.ways; j++) { if (!mem_entry[j]) continue; if (mem_entry[j]->index != i) continue; bitmap |= BIT(j); } /* No valid entries */ if (!bitmap) continue; seq_printf(s, "%d\t", i); for (j = 0; j < table->mem_table.ways; j++) { if (!(bitmap & BIT(j))) { seq_puts(s, "nil\t\t\t\t\t"); continue; } seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan, mem_entry[j]->mac); mem_entry[j] = list_next_entry(mem_entry[j], list); } seq_puts(s, "\n"); } dump_cam_table: if (!table->cam_tbl_entry_cnt) goto done; seq_puts(s, "\n\tExact Match CAM Table\n"); seq_puts(s, "index\tchan\tMAC\n"); /* Traverse cam table entries */ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) { seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan, cam_entry->mac); } done: mutex_unlock(&table->lock); return 0; } RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL); static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused) { struct npc_exact_table *table; struct rvu *rvu = s->private; int i; table = rvu->hw->table; seq_puts(s, "\n\tExact Table Info\n"); seq_printf(s, "Exact Match Feature : %s\n", rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable"); if (!rvu->hw->cap.npc_exact_match_enabled) return 0; seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n"); for (i = 0; i < table->num_drop_rules; i++) seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]); seq_puts(s, "\nMcam Index\tPromisc Mode Status\n"); for (i = 0; i < table->num_drop_rules; i++) seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off"); seq_puts(s, "\n\tMEM Table Info\n"); seq_printf(s, "Ways : %d\n", table->mem_table.ways); seq_printf(s, "Depth : %d\n", table->mem_table.depth); seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask); seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask); seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset); seq_puts(s, "\n\tCAM Table Info\n"); seq_printf(s, "Depth : %d\n", table->cam_table.depth); return 0; } RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL); static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused) { struct npc_exact_table *table; struct rvu *rvu = s->private; struct npc_key_field *field; u16 chan, pcifunc; int blkaddr, i; u64 cfg, cam1; char *str; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); table = rvu->hw->table; field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN]; seq_puts(s, "\n\t Exact Hit on drop status\n"); seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n"); for (i = 0; i < table->num_drop_rules; i++) { pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i); cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0)); /* channel will be always in keyword 0 */ cam1 = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1)); chan = field->kw_mask[0] & cam1; str = (cfg & 1) ? "enabled" : "disabled"; seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i, rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(table->counter_idx[i])), chan, str); } return 0; } RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL); static void rvu_dbg_npc_init(struct rvu *rvu) { rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_mcam_info_fops); debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_mcam_rules_fops); debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_rx_miss_act_fops); if (!rvu->hw->cap.npc_exact_match_enabled) return; debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_exact_entries_fops); debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_exact_info_fops); debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu, &rvu_dbg_npc_exact_drop_cnt_fops); } static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) { struct cpt_ctx *ctx = filp->private; u64 busy_sts = 0, free_sts = 0; u32 e_min = 0, e_max = 0, e, i; u16 max_ses, max_ies, max_aes; struct rvu *rvu = ctx->rvu; int blkaddr = ctx->blkaddr; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); max_ses = reg & 0xffff; max_ies = (reg >> 16) & 0xffff; max_aes = (reg >> 32) & 0xffff; switch (eng_type) { case CPT_AE_TYPE: e_min = max_ses + max_ies; e_max = max_ses + max_ies + max_aes; break; case CPT_SE_TYPE: e_min = 0; e_max = max_ses; break; case CPT_IE_TYPE: e_min = max_ses; e_max = max_ses + max_ies; break; default: return -EINVAL; } for (e = e_min, i = 0; e < e_max; e++, i++) { reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); if (reg & 0x1) busy_sts |= 1ULL << i; if (reg & 0x2) free_sts |= 1ULL << i; } seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts); seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts); return 0; } static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused) { return cpt_eng_sts_display(filp, CPT_AE_TYPE); } RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL); static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused) { return cpt_eng_sts_display(filp, CPT_SE_TYPE); } RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL); static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused) { return cpt_eng_sts_display(filp, CPT_IE_TYPE); } RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) { struct cpt_ctx *ctx = filp->private; u16 max_ses, max_ies, max_aes; struct rvu *rvu = ctx->rvu; int blkaddr = ctx->blkaddr; u32 e_max, e; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); max_ses = reg & 0xffff; max_ies = (reg >> 16) & 0xffff; max_aes = (reg >> 32) & 0xffff; e_max = max_ses + max_ies + max_aes; seq_puts(filp, "===========================================\n"); for (e = 0; e < e_max; e++) { reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e)); seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e, reg & 0xff); reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e)); seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e, reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e)); seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e, reg); seq_puts(filp, "===========================================\n"); } return 0; } RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) { struct cpt_ctx *ctx = filp->private; int blkaddr = ctx->blkaddr; struct rvu *rvu = ctx->rvu; struct rvu_block *block; struct rvu_hwinfo *hw; u64 reg; u32 lf; hw = rvu->hw; block = &hw->block[blkaddr]; if (!block->lf.bmap) return -ENODEV; seq_puts(filp, "===========================================\n"); for (lf = 0; lf < block->lf.max; lf++) { reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf)); seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf)); seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf)); seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg); reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift)); seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg); seq_puts(filp, "===========================================\n"); } return 0; } RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) { struct cpt_ctx *ctx = filp->private; struct rvu *rvu = ctx->rvu; int blkaddr = ctx->blkaddr; u64 reg0, reg1; reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1); reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0)); reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1)); seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1); reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0)); seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0); reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0); reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0); reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0); return 0; } RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) { struct cpt_ctx *ctx = filp->private; struct rvu *rvu = ctx->rvu; int blkaddr = ctx->blkaddr; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); seq_printf(filp, "CPT instruction requests %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); seq_printf(filp, "CPT instruction latency %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); seq_printf(filp, "CPT NCB read requests %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); seq_printf(filp, "CPT NCB read latency %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); seq_printf(filp, "CPT active cycles pc %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); seq_printf(filp, "CPT clock count pc %llu\n", reg); return 0; } RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) { struct cpt_ctx *ctx; if (!is_block_implemented(rvu->hw, blkaddr)) return; if (blkaddr == BLKADDR_CPT0) { rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); ctx = &rvu->rvu_dbg.cpt_ctx[0]; ctx->blkaddr = BLKADDR_CPT0; ctx->rvu = rvu; } else { rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1", rvu->rvu_dbg.root); ctx = &rvu->rvu_dbg.cpt_ctx[1]; ctx->blkaddr = BLKADDR_CPT1; ctx->rvu = rvu; } debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_pc_fops); debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_ae_sts_fops); debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_se_sts_fops); debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_ie_sts_fops); debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_engines_info_fops); debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_lfs_info_fops); debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_err_info_fops); } static const char *rvu_get_dbg_dir_name(struct rvu *rvu) { if (!is_rvu_otx2(rvu)) return "cn10k"; else return "octeontx2"; } void rvu_dbg_init(struct rvu *rvu) { rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rsrc_status_fops); if (!is_rvu_otx2(rvu)) debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_lmtst_map_table_fops); if (!cgx_get_cgxcnt_max()) goto create; if (is_rvu_otx2(rvu)) debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rvu_pf_cgx_map_fops); else debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rvu_pf_cgx_map_fops); create: rvu_dbg_npa_init(rvu); rvu_dbg_nix_init(rvu, BLKADDR_NIX0); rvu_dbg_nix_init(rvu, BLKADDR_NIX1); rvu_dbg_cgx_init(rvu); rvu_dbg_npc_init(rvu); rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); rvu_dbg_mcs_init(rvu); } void rvu_dbg_exit(struct rvu *rvu) { debugfs_remove_recursive(rvu->rvu_dbg.root); } #endif /* CONFIG_DEBUG_FS */
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. */ #include <linux/bitfield.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" #include "npc.h" #include "rvu_npc_fs.h" #include "rvu_npc_hash.h" static const char * const npc_flow_names[] = { [NPC_DMAC] = "dmac", [NPC_SMAC] = "smac", [NPC_ETYPE] = "ether type", [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", [NPC_INNER_VID] = "inner vlan id", [NPC_TOS] = "tos", [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ", [NPC_SIP_IPV4] = "ipv4 source ip", [NPC_DIP_IPV4] = "ipv4 destination ip", [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ", [NPC_SIP_IPV6] = "ipv6 source ip", [NPC_DIP_IPV6] = "ipv6 destination ip", [NPC_IPPROTO_TCP] = "ip proto tcp", [NPC_IPPROTO_UDP] = "ip proto udp", [NPC_IPPROTO_SCTP] = "ip proto sctp", [NPC_IPPROTO_ICMP] = "ip proto icmp", [NPC_IPPROTO_ICMP6] = "ip proto icmp6", [NPC_IPPROTO_AH] = "ip proto AH", [NPC_IPPROTO_ESP] = "ip proto ESP", [NPC_SPORT_TCP] = "tcp source port", [NPC_DPORT_TCP] = "tcp destination port", [NPC_SPORT_UDP] = "udp source port", [NPC_DPORT_UDP] = "udp destination port", [NPC_SPORT_SCTP] = "sctp source port", [NPC_DPORT_SCTP] = "sctp destination port", [NPC_LXMB] = "Mcast/Bcast header ", [NPC_IPSEC_SPI] = "SPI ", [NPC_UNKNOWN] = "unknown", }; bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; u64 mcam_features; u64 unsupported; mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features; unsupported = (mcam_features ^ features) & ~mcam_features; /* Return false if at least one of the input flows is not extracted */ return !unsupported; } const char *npc_get_field_name(u8 hdr) { if (hdr >= ARRAY_SIZE(npc_flow_names)) return npc_flow_names[NPC_UNKNOWN]; return npc_flow_names[hdr]; } /* Compute keyword masks and figure out the number of keywords a field * spans in the key. */ static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, u8 nr_bits, int start_kwi, int offset, u8 intf) { struct npc_key_field *field = &mcam->rx_key_fields[type]; u8 bits_in_kw; int max_kwi; if (mcam->banks_per_entry == 1) max_kwi = 1; /* NPC_MCAM_KEY_X1 */ else if (mcam->banks_per_entry == 2) max_kwi = 3; /* NPC_MCAM_KEY_X2 */ else max_kwi = 6; /* NPC_MCAM_KEY_X4 */ if (is_npc_intf_tx(intf)) field = &mcam->tx_key_fields[type]; if (offset + nr_bits <= 64) { /* one KW only */ if (start_kwi > max_kwi) return; field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) << offset; field->nr_kws = 1; } else if (offset + nr_bits > 64 && offset + nr_bits <= 128) { /* two KWs */ if (start_kwi + 1 > max_kwi) return; /* first KW mask */ bits_in_kw = 64 - offset; field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) << offset; /* second KW mask i.e. mask for rest of bits */ bits_in_kw = nr_bits + offset - 64; field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); field->nr_kws = 2; } else { /* three KWs */ if (start_kwi + 2 > max_kwi) return; /* first KW mask */ bits_in_kw = 64 - offset; field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) << offset; /* second KW mask */ field->kw_mask[start_kwi + 1] = ~0ULL; /* third KW mask i.e. mask for rest of bits */ bits_in_kw = nr_bits + offset - 128; field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); field->nr_kws = 3; } } /* Helper function to figure out whether field exists in the key */ static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_key_field *input; input = &mcam->rx_key_fields[type]; if (is_npc_intf_tx(intf)) input = &mcam->tx_key_fields[type]; return input->nr_kws > 0; } static bool npc_is_same(struct npc_key_field *input, struct npc_key_field *field) { return memcmp(&input->layer_mdata, &field->layer_mdata, sizeof(struct npc_layer_mdata)) == 0; } static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, u64 cfg, u8 lid, u8 lt, u8 intf) { struct npc_key_field *input = &mcam->rx_key_fields[type]; if (is_npc_intf_tx(intf)) input = &mcam->tx_key_fields[type]; input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; input->layer_mdata.ltype = lt; input->layer_mdata.lid = lid; } static bool npc_check_overlap_fields(struct npc_key_field *input1, struct npc_key_field *input2) { int kwi; /* Fields with same layer id and different ltypes are mutually * exclusive hence they can be overlapped */ if (input1->layer_mdata.lid == input2->layer_mdata.lid && input1->layer_mdata.ltype != input2->layer_mdata.ltype) return false; for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) return true; } return false; } /* Helper function to check whether given field overlaps with any other fields * in the key. Due to limitations on key size and the key extraction profile in * use higher layers can overwrite lower layer's header fields. Hence overlap * needs to be checked. */ static bool npc_check_overlap(struct rvu *rvu, int blkaddr, enum key_fields type, u8 start_lid, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_key_field *dummy, *input; int start_kwi, offset; u8 nr_bits, lid, lt, ld; u64 cfg; dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; input = &mcam->rx_key_fields[type]; if (is_npc_intf_tx(intf)) { dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; input = &mcam->tx_key_fields[type]; } for (lid = start_lid; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) { cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG (intf, lid, lt, ld)); if (!FIELD_GET(NPC_LDATA_EN, cfg)) continue; memset(dummy, 0, sizeof(struct npc_key_field)); npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, lid, lt, intf); /* exclude input */ if (npc_is_same(input, dummy)) continue; start_kwi = dummy->layer_mdata.key / 8; offset = (dummy->layer_mdata.key * 8) % 64; nr_bits = dummy->layer_mdata.len * 8; /* form KW masks */ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, start_kwi, offset, intf); /* check any input field bits falls in any * other field bits. */ if (npc_check_overlap_fields(dummy, input)) return true; } } } return false; } static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, u8 intf) { if (!npc_is_field_present(rvu, type, intf) || npc_check_overlap(rvu, blkaddr, type, 0, intf)) return false; return true; } static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number, u8 key_nibble, u8 intf) { u8 offset = (key_nibble * 4) % 64; /* offset within key word */ u8 kwi = (key_nibble * 4) / 64; /* which word in key */ u8 nr_bits = 4; /* bits in a nibble */ u8 type; switch (bit_number) { case 40 ... 43: type = NPC_EXACT_RESULT; break; default: return; } npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); } static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, u8 key_nibble, u8 intf) { u8 offset = (key_nibble * 4) % 64; /* offset within key word */ u8 kwi = (key_nibble * 4) / 64; /* which word in key */ u8 nr_bits = 4; /* bits in a nibble */ u8 type; switch (bit_number) { case 0 ... 2: type = NPC_CHAN; break; case 3: type = NPC_ERRLEV; break; case 4 ... 5: type = NPC_ERRCODE; break; case 6: type = NPC_LXMB; break; /* check for LTYPE only as of now */ case 9: type = NPC_LA; break; case 12: type = NPC_LB; break; case 15: type = NPC_LC; break; case 18: type = NPC_LD; break; case 21: type = NPC_LE; break; case 24: type = NPC_LF; break; case 27: type = NPC_LG; break; case 30: type = NPC_LH; break; default: return; } npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); } static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; struct npc_key_field *key_fields; /* Ether type can come from three layers * (ethernet, single tagged, double tagged) */ struct npc_key_field *etype_ether; struct npc_key_field *etype_tag1; struct npc_key_field *etype_tag2; /* Outer VLAN TCI can come from two layers * (single tagged, double tagged) */ struct npc_key_field *vlan_tag1; struct npc_key_field *vlan_tag2; /* Inner VLAN TCI for double tagged frames */ struct npc_key_field *vlan_tag3; u64 *features; u8 start_lid; int i; key_fields = mcam->rx_key_fields; features = &mcam->rx_features; if (is_npc_intf_tx(intf)) { key_fields = mcam->tx_key_fields; features = &mcam->tx_features; } /* Handle header fields which can come from multiple layers like * etype, outer vlan tci. These fields should have same position in * the key otherwise to install a mcam rule more than one entry is * needed which complicates mcam space management. */ etype_ether = &key_fields[NPC_ETYPE_ETHER]; etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; vlan_tag3 = &key_fields[NPC_VLAN_TAG3]; /* if key profile programmed does not extract Ethertype at all */ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) { dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n"); goto vlan_tci; } /* if key profile programmed extracts Ethertype from one layer */ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) key_fields[NPC_ETYPE] = *etype_ether; if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) key_fields[NPC_ETYPE] = *etype_tag1; if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) key_fields[NPC_ETYPE] = *etype_tag2; /* if key profile programmed extracts Ethertype from multiple layers */ if (etype_ether->nr_kws && etype_tag1->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) { dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n"); goto vlan_tci; } } key_fields[NPC_ETYPE] = *etype_tag1; } if (etype_ether->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) { dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n"); goto vlan_tci; } } key_fields[NPC_ETYPE] = *etype_tag2; } if (etype_tag1->nr_kws && etype_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) { dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n"); goto vlan_tci; } } key_fields[NPC_ETYPE] = *etype_tag2; } /* check none of higher layers overwrite Ethertype */ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) { dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n"); goto vlan_tci; } *features |= BIT_ULL(NPC_ETYPE); vlan_tci: /* if key profile does not extract outer vlan tci at all */ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) { dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n"); goto done; } /* if key profile extracts outer vlan tci from one layer */ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) key_fields[NPC_OUTER_VID] = *vlan_tag1; if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) key_fields[NPC_OUTER_VID] = *vlan_tag2; /* if key profile extracts outer vlan tci from multiple layers */ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) { dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n"); goto done; } } key_fields[NPC_OUTER_VID] = *vlan_tag2; } /* check none of higher layers overwrite outer vlan tci */ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) { dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n"); goto done; } *features |= BIT_ULL(NPC_OUTER_VID); /* If key profile extracts inner vlan tci */ if (vlan_tag3->nr_kws) { key_fields[NPC_INNER_VID] = *vlan_tag3; *features |= BIT_ULL(NPC_INNER_VID); } done: return; } static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, u8 lt, u64 cfg, u8 intf) { struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; struct npc_mcam *mcam = &rvu->hw->mcam; u8 hdr, key, nr_bytes, bit_offset; u8 la_ltype, la_start; /* starting KW index and starting bit position */ int start_kwi, offset; nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); key = FIELD_GET(NPC_KEY_OFFSET, cfg); /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding * ethernet header. */ if (is_npc_intf_tx(intf)) { la_ltype = NPC_LT_LA_IH_NIX_ETHER; la_start = 8; } else { la_ltype = NPC_LT_LA_ETHER; la_start = 0; } #define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ do { \ start_kwi = key / 8; \ offset = (key * 8) % 64; \ if (lid == (hlid) && lt == (hlt)) { \ if ((hstart) >= hdr && \ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ offset += bit_offset; \ start_kwi += offset / 64; \ offset %= 64; \ npc_set_kw_masks(mcam, (name), (hlen) * 8, \ start_kwi, offset, intf); \ } \ } \ } while (0) /* List LID, LTYPE, start offset from layer and length(in bytes) of * packet header fields below. * Example: Source IP is 4 bytes and starts at 12th byte of IP header */ NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1); NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1); NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1); if (rvu->hw->cap.npc_hash_extract) { if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0]) NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4); else NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1]) NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4); else NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); } else { NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); } NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2); NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4); NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4); /* SMAC follows the DMAC(which is 6 bytes) */ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6); /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); } static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; u64 *features = &mcam->rx_features; u64 tcp_udp_sctp; int hdr; if (is_npc_intf_tx(intf)) features = &mcam->tx_features; for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { if (npc_check_field(rvu, blkaddr, hdr, intf)) *features |= BIT_ULL(hdr); } tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); /* for tcp/udp/sctp corresponding layer type should be in the key */ if (*features & tcp_udp_sctp) { if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) *features &= ~tcp_udp_sctp; else *features |= BIT_ULL(NPC_IPPROTO_TCP) | BIT_ULL(NPC_IPPROTO_UDP) | BIT_ULL(NPC_IPPROTO_SCTP); } /* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) { *features |= BIT_ULL(NPC_IPPROTO_AH); *features |= BIT_ULL(NPC_IPPROTO_ICMP); *features |= BIT_ULL(NPC_IPPROTO_ICMP6); } /* for ESP, check if corresponding layer type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) *features |= BIT_ULL(NPC_IPPROTO_ESP); /* for vlan corresponding layer type should be in the key */ if (*features & BIT_ULL(NPC_OUTER_VID)) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */ if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) && (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH)))) *features |= BIT_ULL(NPC_IPSEC_SPI); /* for vlan ethertypes corresponding layer type should be in the key */ if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | BIT_ULL(NPC_VLAN_ETYPE_STAG); /* for L2M/L2B/L3M/L3B, check if the type is present in the key */ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf)) *features |= BIT_ULL(NPC_LXMB); } /* Scan key extraction profile and record how fields of our interest * fill the key structure. Also verify Channel and DMAC exists in * key and not overwritten by other header fields. */ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; u8 lid, lt, ld, bitnr; u64 cfg, masked_cfg; u8 key_nibble = 0; /* Scan and note how parse result is going to be in key. * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from * parse result in the key. The enabled nibbles from parse result * will be concatenated in key. */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); masked_cfg = cfg & NPC_PARSE_NIBBLE; for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) { npc_scan_parse_result(mcam, bitnr, key_nibble, intf); key_nibble++; } /* Ignore exact match bits for mcam entries except the first rule * which is drop on hit. This first rule is configured explitcitly by * exact match code. */ masked_cfg = cfg & NPC_EXACT_NIBBLE; bitnr = NPC_EXACT_NIBBLE_START; for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) { npc_scan_exact_result(mcam, bitnr, key_nibble, intf); key_nibble++; } /* Scan and note how layer data is going to be in key */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) { cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG (intf, lid, lt, ld)); if (!FIELD_GET(NPC_LDATA_EN, cfg)) continue; npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, intf); } } } return 0; } static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) { int err; err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); if (err) return err; err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); if (err) return err; /* Channel is mandatory */ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { dev_err(rvu->dev, "Channel not present in Key\n"); return -EINVAL; } /* check that none of the fields overwrite channel */ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { dev_err(rvu->dev, "Channel cannot be overwritten\n"); return -EINVAL; } npc_set_features(rvu, blkaddr, NIX_INTF_TX); npc_set_features(rvu, blkaddr, NIX_INTF_RX); npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); return 0; } int npc_flow_steering_init(struct rvu *rvu, int blkaddr) { struct npc_mcam *mcam = &rvu->hw->mcam; INIT_LIST_HEAD(&mcam->mcam_rules); return npc_scan_verify_kex(rvu, blkaddr); } static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; u64 *mcam_features = &mcam->rx_features; u64 unsupported; u8 bit; if (is_npc_intf_tx(intf)) mcam_features = &mcam->tx_features; unsupported = (*mcam_features ^ features) & ~(*mcam_features); if (unsupported) { dev_warn(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) dev_warn(rvu->dev, "%s ", npc_get_field_name(bit)); return -EOPNOTSUPP; } return 0; } /* npc_update_entry - Based on the masks generated during * the key scanning, updates the given entry with value and * masks for the field of interest. Maximum 16 bytes of a packet * header can be extracted by HW hence lo and hi are sufficient. * When field bytes are less than or equal to 8 then hi should be * 0 for value and mask. * * If exact match of value is required then mask should be all 1's. * If any bits in mask are 0 then corresponding bits in value are * dont care. */ void npc_update_entry(struct rvu *rvu, enum key_fields type, struct mcam_entry *entry, u64 val_lo, u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) { struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry dummy = { {0} }; struct npc_key_field *field; u64 kw1, kw2, kw3; u8 shift; int i; field = &mcam->rx_key_fields[type]; if (is_npc_intf_tx(intf)) field = &mcam->tx_key_fields[type]; if (!field->nr_kws) return; for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (!field->kw_mask[i]) continue; /* place key value in kw[x] */ shift = __ffs64(field->kw_mask[i]); /* update entry value */ kw1 = (val_lo << shift) & field->kw_mask[i]; dummy.kw[i] = kw1; /* update entry mask */ kw1 = (mask_lo << shift) & field->kw_mask[i]; dummy.kw_mask[i] = kw1; if (field->nr_kws == 1) break; /* place remaining bits of key value in kw[x + 1] */ if (field->nr_kws == 2) { /* update entry value */ kw2 = shift ? val_lo >> (64 - shift) : 0; kw2 |= (val_hi << shift); kw2 &= field->kw_mask[i + 1]; dummy.kw[i + 1] = kw2; /* update entry mask */ kw2 = shift ? mask_lo >> (64 - shift) : 0; kw2 |= (mask_hi << shift); kw2 &= field->kw_mask[i + 1]; dummy.kw_mask[i + 1] = kw2; break; } /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ if (field->nr_kws == 3) { /* update entry value */ kw2 = shift ? val_lo >> (64 - shift) : 0; kw2 |= (val_hi << shift); kw2 &= field->kw_mask[i + 1]; kw3 = shift ? val_hi >> (64 - shift) : 0; kw3 &= field->kw_mask[i + 2]; dummy.kw[i + 1] = kw2; dummy.kw[i + 2] = kw3; /* update entry mask */ kw2 = shift ? mask_lo >> (64 - shift) : 0; kw2 |= (mask_hi << shift); kw2 &= field->kw_mask[i + 1]; kw3 = shift ? mask_hi >> (64 - shift) : 0; kw3 &= field->kw_mask[i + 2]; dummy.kw_mask[i + 1] = kw2; dummy.kw_mask[i + 2] = kw3; break; } } /* dummy is ready with values and masks for given key * field now clear and update input entry with those */ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { if (!field->kw_mask[i]) continue; entry->kw[i] &= ~field->kw_mask[i]; entry->kw_mask[i] &= ~field->kw_mask[i]; entry->kw[i] |= dummy.kw[i]; entry->kw_mask[i] |= dummy.kw_mask[i]; } } static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, u64 features, struct flow_msg *pkt, struct flow_msg *mask, struct rvu_npc_mcam_rule *output, u8 intf) { u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; struct flow_msg *opkt = &output->packet; struct flow_msg *omask = &output->mask; u64 mask_lo, mask_hi; u64 val_lo, val_hi; /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet * values to be programmed in MCAM should as below: * val_high: 0xfe80000000000000 * val_low: 0x2c6863fffe5e2d0a */ if (features & BIT_ULL(NPC_SIP_IPV6)) { be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; val_hi = (u64)src_ip[0] << 32 | src_ip[1]; val_lo = (u64)src_ip[2] << 32 | src_ip[3]; npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, mask_lo, mask_hi, intf); memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); } if (features & BIT_ULL(NPC_DIP_IPV6)) { be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, mask_lo, mask_hi, intf); memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); } } static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, u64 features, u8 intf) { bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); /* If only VLAN id is given then always match outer VLAN id */ if (vid && !ctag && !stag) { npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); return; } if (ctag) npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, ~0ULL, 0, intf); if (stag) npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, ~0ULL, 0, intf); } static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, u64 features, struct flow_msg *pkt, struct flow_msg *mask, struct rvu_npc_mcam_rule *output, u8 intf, int blkaddr) { u64 dmac_mask = ether_addr_to_u64(mask->dmac); u64 smac_mask = ether_addr_to_u64(mask->smac); u64 dmac_val = ether_addr_to_u64(pkt->dmac); u64 smac_val = ether_addr_to_u64(pkt->smac); struct flow_msg *opkt = &output->packet; struct flow_msg *omask = &output->mask; if (!features) return; /* For tcp/udp/sctp LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_TCP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_UDP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_SCTP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_ICMP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP, 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_IPPROTO_ICMP6)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 0, ~0ULL, 0, intf); /* For AH, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_AH)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, 0, ~0ULL, 0, intf); /* For ESP, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_ESP)) npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, 0, ~0ULL, 0, intf); if (features & BIT_ULL(NPC_LXMB)) { output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1; npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0, output->lxmb, 0, intf); } #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ do { \ if (features & BIT_ULL((field))) { \ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ (mask_lo), (mask_hi), intf); \ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ } \ } while (0) NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, ntohs(mask->etype), 0); NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0); NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0, mask->ip_flag, 0); NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, ntohl(mask->ip4src), 0); NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, ntohl(mask->ip4dst), 0); NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, ntohs(mask->sport), 0); NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, ntohs(mask->sport), 0); NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, ntohs(mask->sport), 0); NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, ntohs(mask->dport), 0); NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, ntohl(mask->spi), 0); NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, ntohs(mask->vlan_tci), 0); NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0, ntohs(mask->vlan_itci), 0); NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0, mask->next_header, 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); npc_update_vlan_features(rvu, entry, features, intf); npc_update_field_hash(rvu, intf, entry, blkaddr, features, pkt, mask, opkt, omask); } static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry) { struct rvu_npc_mcam_rule *iter; mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { if (iter->entry == entry) { mutex_unlock(&mcam->lock); return iter; } } mutex_unlock(&mcam->lock); return NULL; } static void rvu_mcam_add_rule(struct npc_mcam *mcam, struct rvu_npc_mcam_rule *rule) { struct list_head *head = &mcam->mcam_rules; struct rvu_npc_mcam_rule *iter; mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { if (iter->entry > rule->entry) break; head = &iter->list; } list_add(&rule->list, head); mutex_unlock(&mcam->lock); } static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule) { struct npc_mcam_oper_counter_req free_req = { 0 }; struct msg_rsp free_rsp; if (!rule->has_cntr) return; free_req.hdr.pcifunc = pcifunc; free_req.cntr = rule->cntr; rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); rule->has_cntr = false; } static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule, struct npc_install_flow_rsp *rsp) { struct npc_mcam_alloc_counter_req cntr_req = { 0 }; struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; int err; cntr_req.hdr.pcifunc = pcifunc; cntr_req.contig = true; cntr_req.count = 1; /* we try to allocate a counter to track the stats of this * rule. If counter could not be allocated then proceed * without counter because counters are limited than entries. */ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); if (!err && cntr_rsp.count) { rule->cntr = cntr_rsp.cntr; rule->has_cntr = true; rsp->counter = rule->cntr; } else { rsp->counter = err; } } static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, struct npc_install_flow_req *req, u16 target, bool pf_set_vfs_mac) { struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) req->chan_mask = 0x0; /* Do not care channel */ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, 0, NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; action.op = req->op; action.index = req->index; action.match_id = req->match_id; action.flow_key_alg = req->flow_key_alg; if (req->op == NIX_RX_ACTION_DEFAULT) { if (pfvf->def_ucast_rule) { action = pfvf->def_ucast_rule->rx_action; } else { /* For profiles which do not extract DMAC, the default * unicast entry is unused. Hence modify action for the * requests which use same action as default unicast * entry */ *(u64 *)&action = 0; action.pf_func = target; action.op = NIX_RX_ACTIONOP_UCAST; } } entry->action = *(u64 *)&action; /* VTAG0 starts at 0th byte of LID_B. * VTAG1 starts at 4th byte of LID_B. */ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); } static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; u64 mask = ~0ULL; /* If AF is installing then do not care about * PF_FUNC in Send Descriptor */ if (is_pffunc_af(req->hdr.pcifunc)) mask = 0; npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), 0, mask, 0, NIX_INTF_TX); *(u64 *)&action = 0x00; action.op = req->op; action.index = req->index; action.match_id = req->match_id; entry->action = *(u64 *)&action; /* VTAG0 starts at 0th byte of LID_B. * VTAG1 starts at 4th byte of LID_B. */ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); } static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, int nixlf, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, struct npc_install_flow_rsp *rsp, bool enable, bool pf_set_vfs_mac) { struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; u64 features, installed_features, missing_features = 0; struct npc_mcam_write_entry_req write_req = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule dummy = { 0 }; struct rvu_npc_mcam_rule *rule; u16 owner = req->hdr.pcifunc; struct msg_rsp write_rsp; struct mcam_entry *entry; bool new = false; u16 entry_index; int err; installed_features = req->features; features = req->features; entry = &write_req.entry_data; entry_index = req->entry; npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, req->intf, blkaddr); if (is_npc_intf_rx(req->intf)) npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); else npc_update_tx_entry(rvu, pfvf, entry, req, target); /* Default unicast rules do not exist for TX */ if (is_npc_intf_tx(req->intf)) goto find_rule; if (req->default_rule) { entry_index = npc_get_nixlf_mcam_index(mcam, target, nixlf, NIXLF_UCAST_ENTRY); enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, entry_index); } /* update mcam entry with default unicast rule attributes */ if (def_ucast_rule && (req->default_rule && req->append)) { missing_features = (def_ucast_rule->features ^ features) & def_ucast_rule->features; if (missing_features) npc_update_flow(rvu, entry, missing_features, &def_ucast_rule->packet, &def_ucast_rule->mask, &dummy, req->intf, blkaddr); installed_features = req->features | missing_features; } find_rule: rule = rvu_mcam_find_rule(mcam, entry_index); if (!rule) { rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM; new = true; } /* allocate new counter if rule has no counter */ if (!req->default_rule && req->set_cntr && !rule->has_cntr) rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); /* if user wants to delete an existing counter for a rule then * free the counter */ if (!req->set_cntr && rule->has_cntr) rvu_mcam_remove_counter_from_rule(rvu, owner, rule); write_req.hdr.pcifunc = owner; /* AF owns the default rules so change the owner just to relax * the checks in rvu_mbox_handler_npc_mcam_write_entry */ if (req->default_rule) write_req.hdr.pcifunc = 0; write_req.entry = entry_index; write_req.intf = req->intf; write_req.enable_entry = (u8)enable; /* if counter is available then clear and use it */ if (req->set_cntr && rule->has_cntr) { rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val); write_req.set_cntr = 1; write_req.cntr = rule->cntr; } /* update rule */ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); rule->entry = entry_index; memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); if (is_npc_intf_tx(req->intf)) memcpy(&rule->tx_action, &entry->action, sizeof(struct nix_tx_action)); rule->vtag_action = entry->vtag_action; rule->features = installed_features; rule->default_rule = req->default_rule; rule->owner = owner; rule->enable = enable; rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK; rule->chan &= rule->chan_mask; rule->lxmb = dummy.lxmb; if (is_npc_intf_tx(req->intf)) rule->intf = pfvf->nix_tx_intf; else rule->intf = pfvf->nix_rx_intf; if (new) rvu_mcam_add_rule(mcam, rule); if (req->default_rule) pfvf->def_ucast_rule = rule; /* write to mcam entry registers */ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &write_rsp); if (err) { rvu_mcam_remove_counter_from_rule(rvu, owner, rule); if (new) { list_del(&rule->list); kfree(rule); } return err; } /* VF's MAC address is being changed via PF */ if (pf_set_vfs_mac) { ether_addr_copy(pfvf->default_mac, req->packet.dmac); ether_addr_copy(pfvf->mac_addr, req->packet.dmac); set_bit(PF_SET_VF_MAC, &pfvf->flags); } if (test_bit(PF_SET_VF_CFG, &pfvf->flags) && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) rule->vfvlan_cfg = true; if (is_npc_intf_rx(req->intf) && req->match_id && (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS)) return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, req->index, req->match_id); return 0; } int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_req *req, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; bool pf_set_vfs_mac = false; bool enable = true; u16 target; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return NPC_MCAM_INVALID_REQ; } if (!is_npc_interface_valid(rvu, req->intf)) return NPC_FLOW_INTF_INVALID; /* If DMAC is not extracted in MKEX, rules installed by AF * can rely on L2MB bit set by hardware protocol checker for * broadcast and multicast addresses. */ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf)) goto process_flow; if (is_pffunc_af(req->hdr.pcifunc) && req->features & BIT_ULL(NPC_DMAC)) { if (is_unicast_ether_addr(req->packet.dmac)) { dev_warn(rvu->dev, "%s: mkex profile does not support ucast flow\n", __func__); return NPC_FLOW_NOT_SUPPORTED; } if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) { dev_warn(rvu->dev, "%s: mkex profile does not support bcast/mcast flow", __func__); return NPC_FLOW_NOT_SUPPORTED; } /* Modify feature to use LXMB instead of DMAC */ req->features &= ~BIT_ULL(NPC_DMAC); req->features |= BIT_ULL(NPC_LXMB); } process_flow: if (from_vf && req->default_rule) return NPC_FLOW_VF_PERM_DENIED; /* Each PF/VF info is maintained in struct rvu_pfvf. * rvu_pfvf for the target PF/VF needs to be retrieved * hence modify pcifunc accordingly. */ /* AF installing for a PF/VF */ if (!req->hdr.pcifunc) target = req->vf; /* PF installing for its VF */ else if (!from_vf && req->vf) { target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); } /* msg received from PF/VF */ else target = req->hdr.pcifunc; /* ignore chan_mask in case pf func is not AF, revisit later */ if (!is_pffunc_af(req->hdr.pcifunc)) req->chan_mask = 0xFFF; err = npc_check_unsupported_flows(rvu, req->features, req->intf); if (err) return NPC_FLOW_NOT_SUPPORTED; pfvf = rvu_get_pfvf(rvu, target); /* PF installing for its VF */ if (req->hdr.pcifunc && !from_vf && req->vf) set_bit(PF_SET_VF_CFG, &pfvf->flags); /* update req destination mac addr */ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && is_zero_ether_addr(req->packet.dmac)) { ether_addr_copy(req->packet.dmac, pfvf->mac_addr); eth_broadcast_addr((u8 *)&req->mask.dmac); } /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) return NPC_FLOW_NO_NIXLF; /* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target) && test_bit(NIXLF_INITIALIZED, &pfvf->flags))) enable = false; /* Packets reaching NPC in Tx path implies that a * NIXLF is properly setup and transmitting. * Hence rules can be enabled for Tx. */ if (is_npc_intf_tx(req->intf)) enable = true; /* Do not allow requests from uninitialized VFs */ if (from_vf && !enable) return NPC_FLOW_VF_NOT_INIT; /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ if (pf_set_vfs_mac && !enable) { ether_addr_copy(pfvf->default_mac, req->packet.dmac); ether_addr_copy(pfvf->mac_addr, req->packet.dmac); set_bit(PF_SET_VF_MAC, &pfvf->flags); return 0; } mutex_lock(&rswitch->switch_lock); err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, enable, pf_set_vfs_mac); mutex_unlock(&rswitch->switch_lock); return err; } static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, u16 pcifunc) { struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; struct msg_rsp dis_rsp; if (rule->default_rule) return 0; if (rule->has_cntr) rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); dis_req.hdr.pcifunc = pcifunc; dis_req.entry = rule->entry; list_del(&rule->list); kfree(rule); return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); } int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, struct npc_delete_flow_req *req, struct npc_delete_flow_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *iter, *tmp; u16 pcifunc = req->hdr.pcifunc; struct list_head del_list; int blkaddr; INIT_LIST_HEAD(&del_list); mutex_lock(&mcam->lock); list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { if (iter->owner == pcifunc) { /* All rules */ if (req->all) { list_move_tail(&iter->list, &del_list); /* Range of rules */ } else if (req->end && iter->entry >= req->start && iter->entry <= req->end) { list_move_tail(&iter->list, &del_list); /* single rule */ } else if (req->entry == iter->entry) { blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr) rsp->cntr_val = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); list_move_tail(&iter->list, &del_list); break; } } } mutex_unlock(&mcam->lock); list_for_each_entry_safe(iter, tmp, &del_list, list) { u16 entry = iter->entry; /* clear the mcam entry target pcifunc */ mcam->entry2target_pffunc[entry] = 0x0; if (npc_delete_flow(rvu, iter, pcifunc)) dev_err(rvu->dev, "rule deletion failed for entry:%u", entry); } return 0; } static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, struct rvu_npc_mcam_rule *rule, struct rvu_pfvf *pfvf) { struct npc_mcam_write_entry_req write_req = { 0 }; struct mcam_entry *entry = &write_req.entry_data; struct npc_mcam *mcam = &rvu->hw->mcam; struct msg_rsp rsp; u8 intf, enable; int err; ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, entry, &intf, &enable); npc_update_entry(rvu, NPC_DMAC, entry, ether_addr_to_u64(pfvf->mac_addr), 0, 0xffffffffffffull, 0, intf); write_req.hdr.pcifunc = rule->owner; write_req.entry = rule->entry; write_req.intf = pfvf->nix_rx_intf; mutex_unlock(&mcam->lock); err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); mutex_lock(&mcam->lock); return err; } void npc_mcam_enable_flows(struct rvu *rvu, u16 target) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); struct rvu_npc_mcam_rule *def_ucast_rule; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule; int blkaddr, bank, index; u64 def_action; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; def_ucast_rule = pfvf->def_ucast_rule; mutex_lock(&mcam->lock); list_for_each_entry(rule, &mcam->mcam_rules, list) { if (is_npc_intf_rx(rule->intf) && rule->rx_action.pf_func == target && !rule->enable) { if (rule->default_rule) { npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, true); rule->enable = true; continue; } if (rule->vfvlan_cfg) npc_update_dmac_value(rvu, blkaddr, rule, pfvf); if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { if (!def_ucast_rule) continue; /* Use default unicast entry action */ rule->rx_action = def_ucast_rule->rx_action; def_action = *(u64 *)&def_ucast_rule->rx_action; bank = npc_get_bank(mcam, rule->entry); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION (rule->entry, bank), def_action); } npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, true); rule->enable = true; } } /* Enable MCAM entries installed by PF with target as VF pcifunc */ for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2target_pffunc[index] == target) npc_enable_mcam_entry(rvu, mcam, blkaddr, index, true); } mutex_unlock(&mcam->lock); } void npc_mcam_disable_flows(struct rvu *rvu, u16 target) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; mutex_lock(&mcam->lock); /* Disable MCAM entries installed by PF with target as VF pcifunc */ for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2target_pffunc[index] == target) npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); } mutex_unlock(&mcam->lock); } /* single drop on non hit rule starting from 0th index. This an extension * to RPM mac filter to support more rules. */ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx, u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask, u64 bcast_mcast_val, u64 bcast_mcast_mask) { struct npc_mcam_alloc_counter_req cntr_req = { 0 }; struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; struct npc_mcam_write_entry_req req = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule; struct msg_rsp rsp; bool enabled; int blkaddr; int err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -ENODEV; } /* Bail out if no exact match support */ if (!rvu_npc_exact_has_match_table(rvu)) { dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__); return -EINVAL; } /* If 0th entry is already used, return err */ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx); if (enabled) { dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n", __func__, mcam_idx); return -EINVAL; } /* Add this entry to mcam rules list */ rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return -ENOMEM; /* Disable rule by default. Enable rule when first dmac filter is * installed */ rule->enable = false; rule->chan = chan_val; rule->chan_mask = chan_mask; rule->entry = mcam_idx; rvu_mcam_add_rule(mcam, rule); /* Reserve slot 0 */ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx); /* Allocate counter for this single drop on non hit rule */ cntr_req.hdr.pcifunc = 0; /* AF request */ cntr_req.contig = true; cntr_req.count = 1; err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); if (err) { dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n", __func__, err); return -EFAULT; } *counter_idx = cntr_rsp.cntr; /* Fill in fields for this mcam entry */ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0, exact_mask, 0, NIX_INTF_RX); npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0, chan_mask, 0, NIX_INTF_RX); npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0, bcast_mcast_mask, 0, NIX_INTF_RX); req.intf = NIX_INTF_RX; req.set_cntr = true; req.cntr = cntr_rsp.cntr; req.entry = mcam_idx; err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp); if (err) { dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n", __func__, mcam_idx); return err; } dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n", __func__, mcam_idx, req.cntr); /* disable entry at Bank 0, index 0 */ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false); return 0; } int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu, struct npc_get_field_status_req *req, struct npc_get_field_status_rsp *rsp) { int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; if (!is_npc_interface_valid(rvu, req->intf)) return NPC_FLOW_INTF_INVALID; if (npc_check_field(rvu, blkaddr, req->field, req->intf)) rsp->enable = 1; return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell MCS driver * * Copyright (C) 2022 Marvell. */ #include "mcs.h" #include "mcs_reg.h" static struct mcs_ops cnf10kb_mcs_ops = { .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities, .mcs_parser_cfg = cnf10kb_mcs_parser_cfg, .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler, .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler, }; struct mcs_ops *cnf10kb_get_mac_ops(void) { return &cnf10kb_mcs_ops; } void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs) { struct hwinfo *hw = mcs->hw; hw->tcam_entries = 64; /* TCAM entries */ hw->secy_entries = 64; /* SecY entries */ hw->sc_entries = 64; /* SC CAM entries */ hw->sa_entries = 128; /* SA entries */ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ hw->mcs_x2p_intf = 1; /* x2p clabration intf */ hw->mcs_blks = 7; /* MCS blocks */ hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */ } void cnf10kb_mcs_parser_cfg(struct mcs *mcs) { u64 reg, val; /* VLAN Ctag */ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22); reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0); mcs_reg_write(mcs, reg, val); reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0); mcs_reg_write(mcs, reg, val); /* VLAN STag */ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23); /* RX */ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1); mcs_reg_write(mcs, reg, val); /* TX */ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1); mcs_reg_write(mcs, reg, val); /* Enable custom tage 0 and 1 and sectag */ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12); reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE; mcs_reg_write(mcs, reg, val); reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE; mcs_reg_write(mcs, reg, val); } void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir) { u64 reg, val; val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6; if (dir == MCS_RX) { reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id); } else { reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id); mcs_reg_write(mcs, reg, map->sci); val |= (map->sc & 0x3F) << 7; reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id); } mcs_reg_write(mcs, reg, val); } void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map) { u64 reg, val; val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7; reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id); mcs_reg_write(mcs, reg, val); reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0; val = mcs_reg_read(mcs, reg); if (map->rekey_ena) val |= BIT_ULL(map->sc_id); else val &= ~BIT_ULL(map->sc_id); mcs_reg_write(mcs, reg, val); mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld); mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld); mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active); } void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map) { u64 val, reg; val = (map->sa_index & 0x7F) | (map->sa_in_use << 7); reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an); mcs_reg_write(mcs, reg, val); } int mcs_set_force_clk_en(struct mcs *mcs, bool set) { unsigned long timeout = jiffies + usecs_to_jiffies(2000); u64 val; val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL); if (set) { val |= BIT_ULL(4); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) { if (time_after(jiffies, timeout)) { dev_err(mcs->dev, "MCS set force clk enable failed\n"); break; } } } else { val &= ~BIT_ULL(4); mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val); } return 0; } /* TX SA interrupt is raised only if autorekey is enabled. * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies * SA in SA_index1 got expired else SA in SA_index0 got expired. */ void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs) { struct mcs_intr_event event; struct rsrc_bmap *sc_bmap; unsigned long rekey_ena; u64 val, sa_status; int sc; sc_bmap = &mcs->tx.sc; event.mcs_id = mcs->mcs_id; event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT; rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0); for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { /* Auto rekey is enable */ if (!test_bit(sc, &rekey_ena)) continue; sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc)); /* Check if tx_sa_active status had changed */ if (sa_status == mcs->tx_sa_active[sc]) continue; /* SA_index0 is expired */ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); if (sa_status) event.sa_id = val & 0x7F; else event.sa_id = (val >> 7) & 0x7F; event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; mcs_add_intr_wq_entry(mcs, &event); } } void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs) { struct mcs_intr_event event = { 0 }; struct rsrc_bmap *sc_bmap; u64 val; int sc; sc_bmap = &mcs->tx.sc; event.mcs_id = mcs->mcs_id; event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT; for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) { val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc)); if (mcs->tx_sa_active[sc]) /* SA_index1 was used and got expired */ event.sa_id = (val >> 7) & 0x7F; else /* SA_index0 was used and got expired */ event.sa_id = val & 0x7F; event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; mcs_add_intr_wq_entry(mcs, &event); } } void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) { struct mcs_intr_event event = { 0 }; int i; if (!(intr & MCS_BBE_INT_MASK)) return; event.mcs_id = mcs->mcs_id; event.pcifunc = mcs->pf_map[0]; for (i = 0; i < MCS_MAX_BBE_INT; i++) { if (!(intr & BIT_ULL(i))) continue; /* Lower nibble denotes data fifo overflow interrupts and * upper nibble indicates policy fifo overflow interrupts. */ if (intr & 0xFULL) event.intr_mask = (dir == MCS_RX) ? MCS_BBE_RX_DFIFO_OVERFLOW_INT : MCS_BBE_TX_DFIFO_OVERFLOW_INT; else event.intr_mask = (dir == MCS_RX) ? MCS_BBE_RX_PLFIFO_OVERFLOW_INT : MCS_BBE_TX_PLFIFO_OVERFLOW_INT; /* Notify the lmac_id info which ran into BBE fatal error */ event.lmac_id = i & 0x3ULL; mcs_add_intr_wq_entry(mcs, &event); } } void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) { struct mcs_intr_event event = { 0 }; int i; if (!(intr & MCS_PAB_INT_MASK)) return; event.mcs_id = mcs->mcs_id; event.pcifunc = mcs->pf_map[0]; for (i = 0; i < MCS_MAX_PAB_INT; i++) { if (!(intr & BIT_ULL(i))) continue; event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT : MCS_PAB_TX_CHAN_OVERFLOW_INT; /* Notify the lmac_id info which ran into PAB fatal error */ event.lmac_id = i; mcs_add_intr_wq_entry(mcs, &event); } }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RPM CN10K driver * * Copyright (C) 2020 Marvell. */ #include <linux/bitfield.h> #include <linux/pci.h> #include "rvu.h" #include "cgx.h" #include "rvu_reg.h" /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 #define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; u64 tbl_base; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; } if (lmt_tbl_op == LMT_TBL_OP_READ) { *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write * to 1 to complete the flush. */ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); } iounmap(lmt_map_base); return 0; } #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 iova, u64 *lmt_addr) { u64 pa, val, pf; int err = 0; if (!iova) { dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); return -EINVAL; } mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); pf = rvu_get_pf(pcifunc) & 0x1F; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); if (err) { dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); goto exit; } val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); if (val & ~0x1ULL) { dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); err = -EIO; goto exit; } /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18] * PA[11:0] = IOVA[11:0] */ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18; pa &= GENMASK_ULL(39, 0); *lmt_addr = (pa << 12) | (iova & 0xFFF); exit: mutex_unlock(&rvu->rsrc_lock); return err; } static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); u32 tbl_idx; int err = 0; u64 val; /* Read the current lmt addr of pcifunc */ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); if (err) { dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", tbl_idx, err); return err; } /* Storing the seondary's lmt base address as this needs to be * reverted in FLR. Also making sure this default value doesn't * get overwritten on multiple calls to this mailbox. */ if (!pfvf->lmt_base_addr) pfvf->lmt_base_addr = val; /* Update the LMT table with new addr */ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); if (err) { dev_err(rvu->dev, "Failed to update LMT map table: index 0x%x err %d\n", tbl_idx, err); return err; } return 0; } int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, struct lmtst_tbl_setup_req *req, struct msg_rsp *rsp) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); u32 pri_tbl_idx, tbl_idx; u64 lmt_addr; int err = 0; u64 val; /* Check if PF_FUNC wants to use it's own local memory as LMTLINE * region, if so, convert that IOVA to physical address and * populate LMT table with that address */ if (req->use_local_lmt_region) { err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, req->lmt_iova, &lmt_addr); if (err < 0) return err; /* Update the lmt addr for this PFFUNC in the LMT table */ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); if (err) return err; } /* Reconfiguring lmtst map table in lmt region shared mode i.e. make * multiple PF_FUNCs to share an LMTLINE region, so primary/base * pcifunc (which is passed as an argument to mailbox) is the one * whose lmt base address will be shared among other secondary * pcifunc (will be the one who is calling this mailbox). */ if (req->base_pcifunc) { /* Calculating the LMT table index equivalent to primary * pcifunc. */ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); /* Read the base lmt addr of the primary pcifunc */ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, LMT_TBL_OP_READ); if (err) { dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", pri_tbl_idx, err); goto error; } /* Update the base lmt addr of secondary with primary's base * lmt addr. */ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); if (err) return err; } /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S * like enabling scheduled LMTST, disable LMTLINE prefetch, disable * early completion for ordered LMTST. */ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) { tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, &val, LMT_TBL_OP_READ); if (err) { dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", tbl_idx + LMT_MAP_TBL_W1_OFF, err); goto error; } /* Storing lmt map table entry word1 default value as this needs * to be reverted in FLR. Also making sure this default value * doesn't get overwritten on multiple calls to this mailbox. */ if (!pfvf->lmt_map_ent_w1) pfvf->lmt_map_ent_w1 = val; /* Disable early completion for Ordered LMTSTs. */ if (req->dis_sched_early_comp) val |= (req->dis_sched_early_comp << APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT); /* Enable scheduled LMTST */ if (req->sch_ena) val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) | req->ssow_pf_func; /* Disables LMTLINE prefetch before receiving store data. */ if (req->dis_line_pref) val |= (req->dis_line_pref << APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT); err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, &val, LMT_TBL_OP_WRITE); if (err) { dev_err(rvu->dev, "Failed to update LMT map table: index 0x%x err %d\n", tbl_idx + LMT_MAP_TBL_W1_OFF, err); goto error; } } error: return err; } /* Resetting the lmtst map table to original base addresses */ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); u32 tbl_idx; int err; if (is_rvu_otx2(rvu)) return; if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) { /* This corresponds to lmt map table index */ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); /* Reverting back original lmt base addr for respective * pcifunc. */ if (pfvf->lmt_base_addr) { err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, LMT_TBL_OP_WRITE); if (err) dev_err(rvu->dev, "Failed to update LMT map table: index 0x%x err %d\n", tbl_idx, err); pfvf->lmt_base_addr = 0; } /* Reverting back to orginal word1 val of lmtst map table entry * which underwent changes. */ if (pfvf->lmt_map_ent_w1) { err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, &pfvf->lmt_map_ent_w1, LMT_TBL_OP_WRITE); if (err) dev_err(rvu->dev, "Failed to update LMT map table: index 0x%x err %d\n", tbl_idx + LMT_MAP_TBL_W1_OFF, err); pfvf->lmt_map_ent_w1 = 0; } } } int rvu_set_channels_base(struct rvu *rvu) { u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans; u16 sdp_chan_base, cgx_chan_base, cpt_chan_base; struct rvu_hwinfo *hw = rvu->hw; u64 nix_const, nix_const1; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); if (blkaddr < 0) return blkaddr; nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); hw->cgx = (nix_const >> 12) & 0xFULL; hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL; hw->cgx_links = hw->cgx * hw->lmac_per_cgx; hw->lbk_links = (nix_const >> 24) & 0xFULL; hw->cpt_links = (nix_const >> 44) & 0xFULL; hw->sdp_links = 1; hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0); hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0); hw->sdp_chan_base = NIX_CHAN_SDP_CH_START; /* No Programmable channels */ if (!(nix_const & BIT_ULL(60))) return 0; hw->cap.programmable_chans = true; /* If programmable channels are present then configure * channels such that all channel numbers are contiguous * leaving no holes. This way the new CPT channels can be * accomodated. The order of channel numbers assigned is * LBK, SDP, CGX and CPT. Also the base channel number * of a block must be multiple of number of channels * of the block. */ nr_lbk_chans = (nix_const >> 16) & 0xFFULL; nr_sdp_chans = nix_const1 & 0xFFFULL; nr_cgx_chans = nix_const & 0xFFULL; nr_cpt_chans = (nix_const >> 32) & 0xFFFULL; sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans; /* Round up base channel to multiple of number of channels */ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans); cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans; hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans); cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans; hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans); /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set */ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) { hw->cpt_chan_base = NIX_CHAN_CPT_CH_START; } else { dev_err(rvu->dev, "CPT channels could not fit in the range 2048-4095\n"); return -EINVAL; } return 0; } #define LBK_CONNECT_NIXX(a) (0x0 + (a)) static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base, u64 offset, int lbkid, u16 chans) { struct rvu_hwinfo *hw = rvu->hw; u64 cfg; cfg = readq(base + offset); cfg &= ~(LBK_LINK_CFG_RANGE_MASK | LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK); cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans)); cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid); cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base); writeq(cfg, base + offset); } static void rvu_lbk_set_channels(struct rvu *rvu) { struct pci_dev *pdev = NULL; void __iomem *base; u64 lbk_const; u8 src, dst; u16 chans; /* To loopback packets between multiple NIX blocks * mutliple LBK blocks are needed. With two NIX blocks, * four LBK blocks are needed and each LBK block * source and destination are as follows: * LBK0 - source NIX0 and destination NIX1 * LBK1 - source NIX0 and destination NIX1 * LBK2 - source NIX1 and destination NIX0 * LBK3 - source NIX1 and destination NIX1 * As per the HRM channel numbers should be programmed as: * P2X and X2P of LBK0 as same * P2X and X2P of LBK3 as same * P2X of LBK1 and X2P of LBK2 as same * P2X of LBK2 and X2P of LBK1 as same */ while (true) { pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, pdev); if (!pdev) return; base = pci_ioremap_bar(pdev, 0); if (!base) goto err_put; lbk_const = readq(base + LBK_CONST); chans = FIELD_GET(LBK_CONST_CHANS, lbk_const); dst = FIELD_GET(LBK_CONST_DST, lbk_const); src = FIELD_GET(LBK_CONST_SRC, lbk_const); if (src == dst) { if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 0, chans); __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 0, chans); } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 1, chans); __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 1, chans); } } else { if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 0, chans); __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 1, chans); } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, 1, chans); __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, 0, chans); } } iounmap(base); } err_put: pci_dev_put(pdev); } static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) { u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans; struct rvu_hwinfo *hw = rvu->hw; int link, nix_link = 0; u16 start; u64 cfg; cgx_chans = nix_const & 0xFFULL; lbk_chans = (nix_const >> 16) & 0xFFULL; sdp_chans = nix_const1 & 0xFFFULL; cpt_chans = (nix_const >> 32) & 0xFFFULL; start = hw->cgx_chan_base; for (link = 0; link < hw->cgx_links; link++, nix_link++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans)); cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); start += cgx_chans; } start = hw->lbk_chan_base; for (link = 0; link < hw->lbk_links; link++, nix_link++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans)); cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); start += lbk_chans; } start = hw->sdp_chan_base; for (link = 0; link < hw->sdp_links; link++, nix_link++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans)); cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); start += sdp_chans; } start = hw->cpt_chan_base; for (link = 0; link < hw->cpt_links; link++, nix_link++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans)); cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); start += cpt_chans; } } static void rvu_nix_set_channels(struct rvu *rvu) { int blkaddr = 0; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { __rvu_nix_set_channels(rvu, blkaddr); blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); } } static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base) { u64 cfg; cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG); cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK); /* There is no read-only constant register to read * the number of channels for LMAC and it is always 16. */ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16)); cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base); cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg); } static void rvu_rpm_set_channels(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; u16 base = hw->cgx_chan_base; int cgx, lmac; for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) { __rvu_rpm_set_channels(cgx, lmac, base); base += 16; } } } void rvu_program_channels(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; if (!hw->cap.programmable_chans) return; rvu_nix_set_channels(rvu); rvu_lbk_set_channels(rvu); rvu_rpm_set_channels(rvu); } void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw) { int blkaddr = nix_hw->blkaddr; u64 cfg; /* Set AF vWQE timer interval to a LF configurable range of * 6.4us to 1.632ms. */ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL); /* Enable NIX RX stream and global conditional clock to * avoild multiple free of NPA buffers. */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG); cfg |= BIT_ULL(1) | BIT_ULL(2); rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg); }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/bitfield.h> #include <linux/module.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" #include "npc.h" #include "cgx.h" #include "npc_profile.h" #include "rvu_npc_hash.h" #define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 #define NPC_HW_TSTAMP_OFFSET 8ULL #define NPC_KEX_CHAN_MASK 0xFFFULL #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL #define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8)) static const char def_pfl_name[] = "default"; static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pcifunc); static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, u16 pcifunc); bool is_npc_intf_tx(u8 intf) { return !!(intf & 0x1); } bool is_npc_intf_rx(u8 intf) { return !(intf & 0x1); } bool is_npc_interface_valid(struct rvu *rvu, u8 intf) { struct rvu_hwinfo *hw = rvu->hw; return intf < hw->npc_intfs; } int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) { /* Due to a HW issue in these silicon versions, parse nibble enable * configuration has to be identical for both Rx and Tx interfaces. */ if (is_rvu_96xx_B0(rvu)) return nibble_ena; return 0; } static int npc_mcam_verify_pf_func(struct rvu *rvu, struct mcam_entry *entry_data, u8 intf, u16 pcifunc) { u16 pf_func, pf_func_mask; if (is_npc_intf_rx(intf)) return 0; pf_func_mask = (entry_data->kw_mask[0] >> 32) & NPC_KEX_PF_FUNC_MASK; pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; pf_func = be16_to_cpu((__force __be16)pf_func); if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || ((pf_func & ~RVU_PFVF_FUNC_MASK) != (pcifunc & ~RVU_PFVF_FUNC_MASK))) return -EINVAL; return 0; } void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; u64 val = 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Config CPI base for the PKIND */ val = pkind | 1ULL << 62; rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); } int rvu_npc_get_pkind(struct rvu *rvu, u16 pf) { struct npc_pkind *pkind = &rvu->hw->pkind; u32 map; int i; for (i = 0; i < pkind->rsrc.max; i++) { map = pkind->pfchan_map[i]; if (((map >> 16) & 0x3F) == pf) return i; } return -1; } #define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20) int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) { int pkind, blkaddr; u64 val; pkind = rvu_npc_get_pkind(rvu, pf); if (pkind < 0) { dev_err(rvu->dev, "%s: pkind not mapped\n", __func__); return -EINVAL; } blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL; } val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); val &= ~NPC_AF_ACTION0_PTR_ADVANCE; /* If timestamp is enabled then configure NPC to shift 8 bytes */ if (enable) val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE, NPC_HW_TSTAMP_OFFSET); rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); return 0; } static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf) { struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); struct rvu *rvu = hw->rvu; int blkaddr = 0, max = 0; struct rvu_block *block; struct rvu_pfvf *pfvf; pfvf = rvu_get_pfvf(rvu, pcifunc); /* Given a PF/VF and NIX LF number calculate the unicast mcam * entry index based on the NIX block assigned to the PF/VF. */ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { if (pfvf->nix_blkaddr == blkaddr) break; block = &rvu->hw->block[blkaddr]; max += block->lf.max; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); } return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; } int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type) { int pf = rvu_get_pf(pcifunc); int index; /* Check if this is for a PF */ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) { /* Reserved entries exclude PF0 */ pf--; index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF); /* Broadcast address matching entry should be first so * that the packet can be replicated to all VFs. */ if (type == NIXLF_BCAST_ENTRY) return index; else if (type == NIXLF_ALLMULTI_ENTRY) return index + 1; else if (type == NIXLF_PROMISC_ENTRY) return index + 2; } return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); } int npc_get_bank(struct npc_mcam *mcam, int index) { int bank = index / mcam->banksize; /* 0,1 & 2,3 banks are combined for this keysize */ if (mcam->keysize == NPC_MCAM_KEY_X2) return bank ? 2 : 0; return bank; } bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index) { int bank = npc_get_bank(mcam, index); u64 cfg; index &= (mcam->banksize - 1); cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank)); return (cfg & 1); } void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, bool enable) { int bank = npc_get_bank(mcam, index); int actbank = bank; index &= (mcam->banksize - 1); for (; bank < (actbank + mcam->banks_per_entry); bank++) { rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank), enable ? 1 : 0); } } static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index) { int bank = npc_get_bank(mcam, index); int actbank = bank; index &= (mcam->banksize - 1); for (; bank < (actbank + mcam->banks_per_entry); bank++) { rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0); } } static void npc_get_keyword(struct mcam_entry *entry, int idx, u64 *cam0, u64 *cam1) { u64 kw_mask = 0x00; #define CAM_MASK(n) (BIT_ULL(n) - 1) /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1. * * Also, only 48 bits of BANKX_CAMX_W1 are valid. */ switch (idx) { case 0: /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */ *cam1 = entry->kw[0]; kw_mask = entry->kw_mask[0]; break; case 1: /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */ *cam1 = entry->kw[1] & CAM_MASK(48); kw_mask = entry->kw_mask[1] & CAM_MASK(48); break; case 2: /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48> * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0> */ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16); *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16); kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16); kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16); break; case 3: /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48> * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0> */ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16); *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16); kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16); kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16); break; case 4: /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32> * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0> */ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32); *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32); kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32); kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32); break; case 5: /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32> * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0> */ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32); *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32); kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32); kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32); break; case 6: /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16> * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0> */ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48); *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48); kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48); kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48); break; case 7: /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48); kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48); break; } *cam1 &= kw_mask; *cam0 = ~*cam1 & kw_mask; } static void npc_fill_entryword(struct mcam_entry *entry, int idx, u64 cam0, u64 cam1) { /* Similar to npc_get_keyword, but fills mcam_entry structure from * CAM registers. */ switch (idx) { case 0: entry->kw[0] = cam1; entry->kw_mask[0] = cam1 ^ cam0; break; case 1: entry->kw[1] = cam1; entry->kw_mask[1] = cam1 ^ cam0; break; case 2: entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); break; case 3: entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); break; case 4: entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); break; case 5: entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); break; case 6: entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); break; case 7: entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; break; } } static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pf_func) { int bank, nixlf, index; /* get ucast entry rule entry index */ nix_get_nixlf(rvu, pf_func, &nixlf, NULL); index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf, NIXLF_UCAST_ENTRY); bank = npc_get_bank(mcam, index); index &= (mcam->banksize - 1); return rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); } static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, struct mcam_entry *entry, bool *enable) { struct rvu_npc_mcam_rule *rule; u16 owner, target_func; struct rvu_pfvf *pfvf; u64 rx_action; owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ if (is_pffunc_af(owner) || is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; /* save entry2target_pffunc */ pfvf = rvu_get_pfvf(rvu, target_func); mcam->entry2target_pffunc[index] = target_func; /* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target_func) && test_bit(NIXLF_INITIALIZED, &pfvf->flags))) *enable = false; /* fix up not needed for the rules added by user(ntuple filters) */ list_for_each_entry(rule, &mcam->mcam_rules, list) { if (rule->entry == index) return; } /* copy VF default entry action to the VF mcam entry */ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, target_func); if (rx_action) entry->action = rx_action; } static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, u8 intf, struct mcam_entry *entry, bool enable) { int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex; u8 tx_intf_mask = ~intf & 0x3; u8 tx_intf = intf; u64 cam0, cam1; actbank = bank; /* Save bank id, to set action later on */ actindex = index; index &= (mcam->banksize - 1); /* Disable before mcam entry update */ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); /* Clear mcam entry to avoid writes being suppressed by NPC */ npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex); /* CAM1 takes the comparison value and * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'. * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0 * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1 * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare. */ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ if (is_npc_intf_tx(intf)) { /* Last bit must be set and rest don't care * for TX interfaces */ tx_intf_mask = 0x1; tx_intf = intf & tx_intf_mask; tx_intf_mask = ~tx_intf & tx_intf_mask; } rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), tx_intf); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), tx_intf_mask); /* Set the match key */ npc_get_keyword(entry, kw, &cam0, &cam1); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0); npc_get_keyword(entry, kw + 1, &cam0, &cam1); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); } /* PF installing VF rule */ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries) npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable); /* Set 'action' */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); /* Set TAG 'action' */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank), entry->vtag_action); /* Enable the entry */ if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); } void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena) { int sbank = npc_get_bank(mcam, src); int bank, kw = 0; u64 cam0, cam1; src &= (mcam->banksize - 1); bank = sbank; for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { cam1 = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); cam0 = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); npc_fill_entryword(entry, kw, cam0, cam1); cam1 = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); cam0 = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); npc_fill_entryword(entry, kw + 1, cam0, cam1); } entry->action = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); entry->vtag_action = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); *intf = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; *ena = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; } static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, u16 dest) { int dbank = npc_get_bank(mcam, dest); int sbank = npc_get_bank(mcam, src); u64 cfg, sreg, dreg; int bank, i; src &= (mcam->banksize - 1); dest &= (mcam->banksize - 1); /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ for (bank = 0; bank < mcam->banks_per_entry; bank++) { sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); for (i = 0; i < 6; i++) { cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); } } /* Copy action */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); /* Copy TAG action */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); /* Enable or disable */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); } static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index) { int bank = npc_get_bank(mcam, index); index &= (mcam->banksize - 1); return rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); } void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_rx_action action = { 0 }; int blkaddr, index; /* AF's and SDP VFs work in promiscuous mode */ if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Ucast rule should not be installed if DMAC * extraction is not supported by the profile. */ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf)) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); /* Don't change the action if entry is already enabled * Otherwise RSS action may get overwritten. */ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, index); } else { action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } req.default_rule = 1; ether_addr_copy(req.packet.dmac, mac_addr); eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ req.vf = action.pf_func; req.index = action.index; req.match_id = action.match_id; req.flow_key_alg = action.flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 chan_cnt) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; struct nix_rx_action action = { 0 }; u64 relaxed_mask; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); if (is_cgx_vf(rvu, pcifunc)) index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, nixlf, NIXLF_PROMISC_ENTRY); /* If the corresponding PF's ucast action is RSS, * use the same action for promisc also */ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); action.index = pfvf->promisc_mce_idx; } /* For cn10k the upper two bits of the channel number are * cpt channel number. with masking out these bits in the * mcam entry, same entry used for NIX will allow packets * received from cpt for parsing. */ if (!is_rvu_otx2(rvu)) { req.chan_mask = NIX_CHAN_CPT_X2P_MASK; } else { req.chan_mask = 0xFFFU; } if (chan_cnt > 1) { if (!is_power_of_2(chan_cnt)) { dev_err(rvu->dev, "%s: channel count more than 1, must be power of 2\n", __func__); return; } relaxed_mask = GENMASK_ULL(BITS_PER_LONG_LONG - 1, ilog2(chan_cnt)); req.chan_mask &= relaxed_mask; } req.channel = chan; req.intf = pfvf->nix_rx_intf; req.entry = index; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc; req.index = action.index; req.match_id = action.match_id; req.flow_key_alg = action.flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { struct rvu_pfvf *pfvf; struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Skip LBK VFs */ if (is_afvf(pcifunc)) return; /* If pkt replication is not supported, * then only PF is allowed to add a bcast match entry. */ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc)) return; /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; pfvf = rvu_get_pfvf(rvu, pcifunc); /* Bcast rule should not be installed if both DMAC * and LXMB extraction is not supported by the profile. */ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); if (!hw->cap.nix_rx_multicast) { /* Early silicon doesn't support pkt replication, * so install entry with UCAST action, so that PF * receives all broadcast packets. */ req.op = NIX_RX_ACTIONOP_UCAST; } else { req.op = NIX_RX_ACTIONOP_MCAST; req.index = pfvf->bcast_mce_idx; } eth_broadcast_addr((u8 *)&req.packet.dmac); eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.entry = index; req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; u8 mac_addr[ETH_ALEN] = { 0 }; struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Get 'pcifunc' of PF device */ vf_func = pcifunc & RVU_PFVF_FUNC_MASK; pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; pfvf = rvu_get_pfvf(rvu, pcifunc); /* Mcast rule should not be installed if both DMAC * and LXMB extraction is not supported by the profile. */ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) && !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf)) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_ALLMULTI_ENTRY); /* If the corresponding PF's ucast action is RSS, * use the same action for multicast entry also */ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; action.index = pfvf->mcast_mce_idx; } mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */ ether_addr_copy(req.packet.dmac, mac_addr); ether_addr_copy(req.mask.dmac, mac_addr); req.features = BIT_ULL(NPC_DMAC); /* For cn10k the upper two bits of the channel number are * cpt channel number. with masking out these bits in the * mcam entry, same entry used for NIX will allow packets * received from cpt for parsing. */ if (!is_rvu_otx2(rvu)) req.chan_mask = NIX_CHAN_CPT_X2P_MASK; else req.chan_mask = 0xFFFU; req.channel = chan; req.intf = pfvf->nix_rx_intf; req.entry = index; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ req.vf = pcifunc | vf_func; req.index = action.index; req.match_id = action.match_id; req.flow_key_alg = action.flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_ALLMULTI_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pcifunc, u64 rx_action) { int actindex, index, bank, entry; struct rvu_npc_mcam_rule *rule; bool enable, update; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return; mutex_lock(&mcam->lock); for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2target_pffunc[index] == pcifunc) { update = true; /* update not needed for the rules added via ntuple filters */ list_for_each_entry(rule, &mcam->mcam_rules, list) { if (rule->entry == index) update = false; } if (!update) continue; bank = npc_get_bank(mcam, index); actindex = index; entry = index & (mcam->banksize - 1); /* read vf flow entry enable status */ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, actindex); /* disable before mcam entry update */ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); /* update 'action' */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(entry, bank), rx_action); if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); } } mutex_unlock(&mcam->lock); } void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Check if this is for reserved default entry */ if (mcam_index < 0) { if (group != DEFAULT_RSS_CONTEXT_GROUP) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); } else { /* TODO: validate this mcam index */ index = mcam_index; } if (index >= mcam->total_entries) return; bank = npc_get_bank(mcam, index); index &= (mcam->banksize - 1); *(u64 *)&action = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); /* Ignore if no action was set earlier */ if (!*(u64 *)&action) return; action.op = NIX_RX_ACTIONOP_RSS; action.pf_func = pcifunc; action.index = group; action.flow_key_alg = alg_idx; rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); /* update the VF flow rule action with the VF default entry action */ if (mcam_index < 0) npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, *(u64 *)&action); /* update the action change in default rule */ pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->def_ucast_rule) pfvf->def_ucast_rule->rx_action = action; index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); /* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well */ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { bank = npc_get_bank(mcam, index); index &= (mcam->banksize - 1); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); } } void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, int nixlf, int type, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_mce_list *mce_list; int index, blkaddr, mce_idx; struct rvu_pfvf *pfvf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, nixlf, type); /* disable MCAM entry when packet replication is not supported by hw */ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) { npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); return; } /* return incase mce list is not enabled */ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); if (hw->cap.nix_rx_multicast && is_vf(pcifunc) && type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list) return; nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); nix_update_mce_list(rvu, pcifunc, mce_list, mce_idx, index, enable); if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; /* Ucast MCAM match entry of this PF/VF */ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); /* Nothing to do for VFs, on platforms where pkt replication * is not supported */ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast) return; /* add/delete pf_func to broadcast MCE list */ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, NIXLF_BCAST_ENTRY, enable); } void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { if (nixlf < 0) return; npc_enadis_default_entries(rvu, pcifunc, nixlf, false); /* Delete multicast and promisc MCAM entries */ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, NIXLF_ALLMULTI_ENTRY, false); npc_enadis_default_mce_entry(rvu, pcifunc, nixlf, NIXLF_PROMISC_ENTRY, false); } bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable) { int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule, *tmp; mutex_lock(&mcam->lock); list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (rule->intf != intf) continue; if (rule->entry != entry) continue; rule->enable = enable; mutex_unlock(&mcam->lock); npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, enable); return true; } mutex_unlock(&mcam->lock); return false; } void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { if (nixlf < 0) return; /* Enables only broadcast match entry. Promisc/Allmulti are enabled * in set_rx_mode mbox handler. */ npc_enadis_default_entries(rvu, pcifunc, nixlf, true); } void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule, *tmp; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; mutex_lock(&mcam->lock); /* Disable MCAM entries directing traffic to this 'pcifunc' */ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (is_npc_intf_rx(rule->intf) && rule->rx_action.pf_func == pcifunc && rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) { npc_enable_mcam_entry(rvu, mcam, blkaddr, rule->entry, false); rule->enable = false; /* Indicate that default rule is disabled */ if (rule->default_rule) { pfvf->def_ucast_rule = NULL; list_del(&rule->list); kfree(rule); } } } mutex_unlock(&mcam->lock); npc_mcam_disable_flows(rvu, pcifunc); rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); } void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule *rule, *tmp; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; mutex_lock(&mcam->lock); /* Free all MCAM entries owned by this 'pcifunc' */ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); /* Free all MCAM counters owned by this 'pcifunc' */ npc_mcam_free_all_counters(rvu, mcam, pcifunc); /* Delete MCAM entries owned by this 'pcifunc' */ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { if (rule->owner == pcifunc && !rule->default_rule) { list_del(&rule->list); kfree(rule); } } mutex_unlock(&mcam->lock); rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); } static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, struct npc_mcam_kex *mkex, u8 intf) { int lid, lt, ld, fl; if (is_npc_intf_tx(intf)) return; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), mkex->keyx_cfg[NIX_INTF_RX]); /* Program LDATA */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) SET_KEX_LD(intf, lid, lt, ld, mkex->intf_lid_lt_ld[NIX_INTF_RX] [lid][lt][ld]); } } /* Program LFLAGS */ for (ld = 0; ld < NPC_MAX_LD; ld++) { for (fl = 0; fl < NPC_MAX_LFL; fl++) SET_KEX_LDFLAGS(intf, ld, fl, mkex->intf_ld_flags[NIX_INTF_RX] [ld][fl]); } } static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, struct npc_mcam_kex *mkex, u8 intf) { int lid, lt, ld, fl; if (is_npc_intf_rx(intf)) return; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), mkex->keyx_cfg[NIX_INTF_TX]); /* Program LDATA */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) SET_KEX_LD(intf, lid, lt, ld, mkex->intf_lid_lt_ld[NIX_INTF_TX] [lid][lt][ld]); } } /* Program LFLAGS */ for (ld = 0; ld < NPC_MAX_LD; ld++) { for (fl = 0; fl < NPC_MAX_LFL; fl++) SET_KEX_LDFLAGS(intf, ld, fl, mkex->intf_ld_flags[NIX_INTF_TX] [ld][fl]); } } static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, struct npc_mcam_kex *mkex) { struct rvu_hwinfo *hw = rvu->hw; u8 intf; int ld; for (ld = 0; ld < NPC_MAX_LD; ld++) rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), mkex->kex_ld_flags[ld]); for (intf = 0; intf < hw->npc_intfs; intf++) { npc_program_mkex_rx(rvu, blkaddr, mkex, intf); npc_program_mkex_tx(rvu, blkaddr, mkex, intf); } /* Programme mkex hash profile */ npc_program_mkex_hash(rvu, blkaddr); } static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr, u64 *size) { u64 prfl_addr, prfl_sz; if (!rvu->fwdata) return -EINVAL; prfl_addr = rvu->fwdata->mcam_addr; prfl_sz = rvu->fwdata->mcam_sz; if (!prfl_addr || !prfl_sz) return -EINVAL; *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz); if (!(*prfl_img_addr)) return -ENOMEM; *size = prfl_sz; return 0; } /* strtoull of "mkexprof" with base:36 */ #define MKEX_END_SIGN 0xdeadbeef static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr, const char *mkex_profile) { struct device *dev = &rvu->pdev->dev; struct npc_mcam_kex *mcam_kex; void __iomem *mkex_prfl_addr = NULL; u64 prfl_sz; int ret; /* If user not selected mkex profile */ if (rvu->kpu_fwdata_sz || !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN)) goto program_mkex; /* Setting up the mapping for mkex profile image */ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz); if (ret < 0) goto program_mkex; mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr; while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { /* Compare with mkex mod_param name string */ if (mcam_kex->mkex_sign == MKEX_SIGN && !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { /* Due to an errata (35786) in A0/B0 pass silicon, * parse nibble enable configuration has to be * identical for both Rx and Tx interfaces. */ if (!is_rvu_96xx_B0(rvu) || mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX]) rvu->kpu.mkex = mcam_kex; goto program_mkex; } mcam_kex++; prfl_sz -= sizeof(struct npc_mcam_kex); } dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile); program_mkex: dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name); /* Program selected mkex profile */ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex); if (mkex_prfl_addr) iounmap(mkex_prfl_addr); } static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, const struct npc_kpu_profile_action *kpuaction, int kpu, int entry, bool pkind) { struct npc_kpu_action0 action0 = {0}; struct npc_kpu_action1 action1 = {0}; u64 reg; action1.errlev = kpuaction->errlev; action1.errcode = kpuaction->errcode; action1.dp0_offset = kpuaction->dp0_offset; action1.dp1_offset = kpuaction->dp1_offset; action1.dp2_offset = kpuaction->dp2_offset; if (pkind) reg = NPC_AF_PKINDX_ACTION1(entry); else reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry); rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1); action0.byp_count = kpuaction->bypass_count; action0.capture_ena = kpuaction->cap_ena; action0.parse_done = kpuaction->parse_done; action0.next_state = kpuaction->next_state; action0.capture_lid = kpuaction->lid; action0.capture_ltype = kpuaction->ltype; action0.capture_flags = kpuaction->flags; action0.ptr_advance = kpuaction->ptr_advance; action0.var_len_offset = kpuaction->offset; action0.var_len_mask = kpuaction->mask; action0.var_len_right = kpuaction->right; action0.var_len_shift = kpuaction->shift; if (pkind) reg = NPC_AF_PKINDX_ACTION0(entry); else reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry); rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0); } static void npc_config_kpucam(struct rvu *rvu, int blkaddr, const struct npc_kpu_profile_cam *kpucam, int kpu, int entry) { struct npc_kpu_cam cam0 = {0}; struct npc_kpu_cam cam1 = {0}; cam1.state = kpucam->state & kpucam->state_mask; cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask; cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask; cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask; cam0.state = ~kpucam->state & kpucam->state_mask; cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask; cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask; cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask; rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0); rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1); } static inline u64 enable_mask(int count) { return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL)); } static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, const struct npc_kpu_profile *profile) { int entry, num_entries, max_entries; u64 entry_mask; if (profile->cam_entries != profile->action_entries) { dev_err(rvu->dev, "KPU%d: CAM and action entries [%d != %d] not equal\n", kpu, profile->cam_entries, profile->action_entries); } max_entries = rvu->hw->npc_kpu_entries; /* Program CAM match entries for previous KPU extracted data */ num_entries = min_t(int, profile->cam_entries, max_entries); for (entry = 0; entry < num_entries; entry++) npc_config_kpucam(rvu, blkaddr, &profile->cam[entry], kpu, entry); /* Program this KPU's actions */ num_entries = min_t(int, profile->action_entries, max_entries); for (entry = 0; entry < num_entries; entry++) npc_config_kpuaction(rvu, blkaddr, &profile->action[entry], kpu, entry, false); /* Enable all programmed entries */ num_entries = min_t(int, profile->action_entries, profile->cam_entries); entry_mask = enable_mask(num_entries); /* Disable first KPU_MAX_CST_ENT entries for built-in profile */ if (!rvu->kpu.custom) entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0); rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask); if (num_entries > 64) { rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRY_DISX(kpu, 1), enable_mask(num_entries - 64)); } /* Enable this KPU */ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01); } static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile) { profile->custom = 0; profile->name = def_pfl_name; profile->version = NPC_KPU_PROFILE_VER; profile->ikpu = ikpu_action_entries; profile->pkinds = ARRAY_SIZE(ikpu_action_entries); profile->kpu = npc_kpu_profiles; profile->kpus = ARRAY_SIZE(npc_kpu_profiles); profile->lt_def = &npc_lt_defaults; profile->mkex = &npc_mkex_default; profile->mkex_hash = &npc_mkex_hash_default; return 0; } static int npc_apply_custom_kpu(struct rvu *rvu, struct npc_kpu_profile_adapter *profile) { size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0; struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata; struct npc_kpu_profile_action *action; struct npc_kpu_profile_cam *cam; struct npc_kpu_fwdata *fw_kpu; int entries; u16 kpu, entry; if (rvu->kpu_fwdata_sz < hdr_sz) { dev_warn(rvu->dev, "Invalid KPU profile size\n"); return -EINVAL; } if (le64_to_cpu(fw->signature) != KPU_SIGN) { dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n", fw->signature); return -EINVAL; } /* Verify if the using known profile structure */ if (NPC_KPU_VER_MAJ(profile->version) > NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) { dev_warn(rvu->dev, "Not supported Major version: %d > %d\n", NPC_KPU_VER_MAJ(profile->version), NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)); return -EINVAL; } /* Verify if profile is aligned with the required kernel changes */ if (NPC_KPU_VER_MIN(profile->version) < NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) { dev_warn(rvu->dev, "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n", NPC_KPU_VER_MAJ(profile->version), NPC_KPU_VER_MIN(profile->version), NPC_KPU_VER_PATCH(profile->version), NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER), NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER), NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER)); return -EINVAL; } /* Verify if profile fits the HW */ if (fw->kpus > profile->kpus) { dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus, profile->kpus); return -EINVAL; } profile->custom = 1; profile->name = fw->name; profile->version = le64_to_cpu(fw->version); profile->mkex = &fw->mkex; profile->lt_def = &fw->lt_def; for (kpu = 0; kpu < fw->kpus; kpu++) { fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset); if (fw_kpu->entries > KPU_MAX_CST_ENT) dev_warn(rvu->dev, "Too many custom entries on KPU%d: %d > %d\n", kpu, fw_kpu->entries, KPU_MAX_CST_ENT); entries = min(fw_kpu->entries, KPU_MAX_CST_ENT); cam = (struct npc_kpu_profile_cam *)fw_kpu->data; offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam); action = (struct npc_kpu_profile_action *)(fw->data + offset); offset += fw_kpu->entries * sizeof(*action); if (rvu->kpu_fwdata_sz < hdr_sz + offset) { dev_warn(rvu->dev, "Profile size mismatch on KPU%i parsing.\n", kpu + 1); return -EINVAL; } for (entry = 0; entry < entries; entry++) { profile->kpu[kpu].cam[entry] = cam[entry]; profile->kpu[kpu].action[entry] = action[entry]; } } return 0; } static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr, u64 prfl_sz, const char *kpu_profile) { struct npc_kpu_profile_fwdata *kpu_data = NULL; int rc = -EINVAL; kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr; if (le64_to_cpu(kpu_data->signature) == KPU_SIGN && !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) { dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n", kpu_profile); rvu->kpu_fwdata = kpu_data; rvu->kpu_fwdata_sz = prfl_sz; rvu->kpu_prfl_addr = prfl_addr; rc = 0; } return rc; } static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, const char *kpu_profile) { struct npc_coalesced_kpu_prfl *img_data = NULL; int i = 0, rc = -EINVAL; void __iomem *kpu_prfl_addr; u16 offset; img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; if (le64_to_cpu(img_data->signature) == KPU_SIGN && !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) { /* Loaded profile is a single KPU profile. */ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr, prfl_sz, kpu_profile); goto done; } /* Loaded profile is coalesced image, offset of first KPU profile.*/ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) + (img_data->num_prfl * sizeof(uint16_t)); /* Check if mapped image is coalesced image. */ while (i < img_data->num_prfl) { /* Profile image offsets are rounded up to next 8 multiple.*/ offset = ALIGN_8B_CEIL(offset); kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr + offset); rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr, img_data->prfl_sz[i], kpu_profile); if (!rc) break; /* Calculating offset of profile image based on profile size.*/ offset += img_data->prfl_sz[i]; i++; } done: return rc; } static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile) { int ret = -EINVAL; u64 prfl_sz; /* Setting up the mapping for NPC profile image */ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz); if (ret < 0) goto done; /* Detect if profile is coalesced or single KPU profile and load */ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile); if (ret == 0) goto done; /* Cleaning up if KPU profile image from fwdata is not valid. */ if (rvu->kpu_prfl_addr) { iounmap(rvu->kpu_prfl_addr); rvu->kpu_prfl_addr = NULL; rvu->kpu_fwdata_sz = 0; rvu->kpu_fwdata = NULL; } done: return ret; } static void npc_load_kpu_profile(struct rvu *rvu) { struct npc_kpu_profile_adapter *profile = &rvu->kpu; const char *kpu_profile = rvu->kpu_pfl_name; const struct firmware *fw = NULL; bool retry_fwdb = false; /* If user not specified profile customization */ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN)) goto revert_to_default; /* First prepare default KPU, then we'll customize top entries. */ npc_prepare_default_kpu(profile); /* Order of preceedence for load loading NPC profile (high to low) * Firmware binary in filesystem. * Firmware database method. * Default KPU profile. */ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) { dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n", kpu_profile); rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL); if (rvu->kpu_fwdata) { memcpy(rvu->kpu_fwdata, fw->data, fw->size); rvu->kpu_fwdata_sz = fw->size; } release_firmware(fw); retry_fwdb = true; goto program_kpu; } load_image_fwdb: /* Loading the KPU profile using firmware database */ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile)) goto revert_to_default; program_kpu: /* Apply profile customization if firmware was loaded. */ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) { /* If image from firmware filesystem fails to load or invalid * retry with firmware database method. */ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) { /* Loading image from firmware database failed. */ if (rvu->kpu_prfl_addr) { iounmap(rvu->kpu_prfl_addr); rvu->kpu_prfl_addr = NULL; } else { kfree(rvu->kpu_fwdata); } rvu->kpu_fwdata = NULL; rvu->kpu_fwdata_sz = 0; if (retry_fwdb) { retry_fwdb = false; goto load_image_fwdb; } } dev_warn(rvu->dev, "Can't load KPU profile %s. Using default.\n", kpu_profile); kfree(rvu->kpu_fwdata); rvu->kpu_fwdata = NULL; goto revert_to_default; } dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n", profile->name, NPC_KPU_VER_MAJ(profile->version), NPC_KPU_VER_MIN(profile->version), NPC_KPU_VER_PATCH(profile->version)); return; revert_to_default: npc_prepare_default_kpu(profile); } static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; int num_pkinds, num_kpus, idx; /* Disable all KPUs and their entries */ for (idx = 0; idx < hw->npc_kpus; idx++) { rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL); rvu_write64(rvu, blkaddr, NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL); rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00); } /* Load and customize KPU profile. */ npc_load_kpu_profile(rvu); /* First program IKPU profile i.e PKIND configs. * Check HW max count to avoid configuring junk or * writing to unsupported CSR addresses. */ num_pkinds = rvu->kpu.pkinds; num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds); for (idx = 0; idx < num_pkinds; idx++) npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true); /* Program KPU CAM and Action profiles */ num_kpus = rvu->kpu.kpus; num_kpus = min_t(int, hw->npc_kpus, num_kpus); for (idx = 0; idx < num_kpus; idx++) npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]); } static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) { int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err; u16 index; int cntr; u64 cfg; /* Actual number of MCAM entries vary by entry size */ cfg = (rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize; mcam->keysize = cfg; /* Number of banks combined per MCAM entry */ if (cfg == NPC_MCAM_KEY_X4) mcam->banks_per_entry = 4; else if (cfg == NPC_MCAM_KEY_X2) mcam->banks_per_entry = 2; else mcam->banks_per_entry = 1; /* Reserve one MCAM entry for each of the NIX LF to * guarantee space to install default matching DMAC rule. * Also reserve 2 MCAM entries for each PF for default * channel based matching or 'bcast & promisc' matching to * support BCAST and PROMISC modes of operation for PFs. * PF0 is excluded. */ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) + ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF); if (mcam->total_entries <= rsvd) { dev_warn(rvu->dev, "Insufficient NPC MCAM size %d for pkt I/O, exiting\n", mcam->total_entries); return -ENOMEM; } mcam->bmap_entries = mcam->total_entries - rsvd; mcam->nixlf_offset = mcam->bmap_entries; mcam->pf_offset = mcam->nixlf_offset + nixlf_count; /* Allocate bitmaps for managing MCAM entries */ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), sizeof(long), GFP_KERNEL); if (!mcam->bmap) return -ENOMEM; mcam->bmap_reverse = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), sizeof(long), GFP_KERNEL); if (!mcam->bmap_reverse) return -ENOMEM; mcam->bmap_fcnt = mcam->bmap_entries; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, sizeof(u16), GFP_KERNEL); if (!mcam->entry2pfvf_map) return -ENOMEM; /* Reserve 1/8th of MCAM entries at the bottom for low priority * allocations and another 1/8th at the top for high priority * allocations. */ mcam->lprio_count = mcam->bmap_entries / 8; if (mcam->lprio_count > BITS_PER_LONG) mcam->lprio_count = round_down(mcam->lprio_count, BITS_PER_LONG); mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; mcam->hprio_count = mcam->lprio_count; mcam->hprio_end = mcam->hprio_count; /* Allocate bitmap for managing MCAM counters and memory * for saving counter to RVU PFFUNC allocation mapping. */ err = rvu_alloc_bitmap(&mcam->counters); if (err) return err; mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, sizeof(u16), GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_mem; /* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, sizeof(u16), GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_mem; mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, sizeof(u16), GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_mem; /* Alloc memory for saving target device of mcam rule */ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, sizeof(u16), GFP_KERNEL); if (!mcam->entry2target_pffunc) goto free_mem; for (index = 0; index < mcam->bmap_entries; index++) { mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; } for (cntr = 0; cntr < mcam->counters.max; cntr++) mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; mutex_init(&mcam->lock); return 0; free_mem: kfree(mcam->counters.bmap); return -ENOMEM; } static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) { struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; u64 npc_const, npc_const1; u64 npc_const2 = 0; npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); if (npc_const1 & BIT_ULL(63)) npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT; hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL; hw->npc_kpu_entries = npc_const1 & 0xFFFULL; hw->npc_kpus = (npc_const >> 8) & 0x1FULL; hw->npc_intfs = npc_const & 0xFULL; hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; mcam->banks = (npc_const >> 44) & 0xFULL; mcam->banksize = (npc_const >> 28) & 0xFFFFULL; hw->npc_stat_ena = BIT_ULL(9); /* Extended set */ if (npc_const2) { hw->npc_ext_set = true; /* 96xx supports only match_stats and npc_counters * reflected in NPC_AF_CONST reg. * STAT_SEL and ENA are at [0:8] and 9 bit positions. * 98xx has both match_stat and ext and npc_counter * reflected in NPC_AF_CONST2 * STAT_SEL_EXT added at [12:14] bit position. * cn10k supports only ext and hence npc_counters in * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63 */ if (!hw->npc_counters) hw->npc_stat_ena = BIT_ULL(63); hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; mcam->banksize = npc_const2 & 0xFFFFULL; } mcam->counters.max = hw->npc_counters; } static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) { struct npc_mcam_kex *mkex = rvu->kpu.mkex; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; u64 nibble_ena, rx_kex, tx_kex; u8 intf; /* Reserve last counter for MCAM RX miss action which is set to * drop packet. This way we will know how many pkts didn't match * any MCAM entry. */ mcam->counters.max--; mcam->rx_miss_act_cntr = mcam->counters.max; rx_kex = mkex->keyx_cfg[NIX_INTF_RX]; tx_kex = mkex->keyx_cfg[NIX_INTF_TX]; nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); if (nibble_ena) { tx_kex &= ~NPC_PARSE_NIBBLE; tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); mkex->keyx_cfg[NIX_INTF_TX] = tx_kex; } /* Configure RX interfaces */ for (intf = 0; intf < hw->npc_intfs; intf++) { if (is_npc_intf_tx(intf)) continue; /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), rx_kex); /* If MCAM lookup doesn't result in a match, drop the received * packet. And map this action to a counter to count dropped * packets. */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(intf), ((mcam->rx_miss_act_cntr >> 9) << 12) | hw->npc_stat_ena | mcam->rx_miss_act_cntr); } /* Configure TX interfaces */ for (intf = 0; intf < hw->npc_intfs; intf++) { if (is_npc_intf_rx(intf)) continue; /* Extract Ltypes LID_LA to LID_LE */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), tx_kex); /* Set TX miss action to UCAST_DEFAULT i.e * transmit the packet on NIX LF SQ's default channel. */ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(intf), NIX_TX_ACTIONOP_UCAST_DEFAULT); } } int rvu_npc_init(struct rvu *rvu) { struct npc_kpu_profile_adapter *kpu = &rvu->kpu; struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, entry, bank, err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -ENODEV; } rvu_npc_hw_init(rvu, blkaddr); /* First disable all MCAM entries, to stop traffic towards NIXLFs */ for (bank = 0; bank < mcam->banks; bank++) { for (entry = 0; entry < mcam->banksize; entry++) rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); } err = rvu_alloc_bitmap(&pkind->rsrc); if (err) return err; /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0', * no need to configure PKIND for all LBKs separately. */ rvu_alloc_rsrc(&pkind->rsrc); /* Allocate mem for pkind to PF and channel mapping info */ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max, sizeof(u32), GFP_KERNEL); if (!pkind->pfchan_map) return -ENOMEM; /* Configure KPU profile */ npc_parser_profile_init(rvu, blkaddr); /* Config Outer L2, IPv4's NPC layer info */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2, (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) | kpu->lt_def->pck_ol2.ltype_mask); rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) | kpu->lt_def->pck_oip4.ltype_mask); /* Config Inner IPV4 NPC layer info */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) | kpu->lt_def->pck_iip4.ltype_mask); /* Enable below for Rx pkts. * - Outer IPv4 header checksum validation. * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. * - Inner IPv4 header checksum validation. * - Set non zero checksum error code value */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); rvu_npc_setup_interfaces(rvu, blkaddr); npc_config_secret_key(rvu, blkaddr); /* Configure MKEX profile */ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); err = npc_mcam_rsrcs_init(rvu, blkaddr); if (err) return err; err = npc_flow_steering_init(rvu, blkaddr); if (err) { dev_err(rvu->dev, "Incorrect mkex profile loaded using default mkex\n"); npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); } return 0; } void rvu_npc_freemem(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_mcam *mcam = &rvu->hw->mcam; kfree(pkind->rsrc.bmap); kfree(mcam->counters.bmap); if (rvu->kpu_prfl_addr) iounmap(rvu->kpu_prfl_addr); else kfree(rvu->kpu_fwdata); mutex_destroy(&mcam->lock); } void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt) { struct npc_mcam *mcam = &rvu->hw->mcam; int entry; *alloc_cnt = 0; *enable_cnt = 0; for (entry = 0; entry < mcam->bmap_entries; entry++) { if (mcam->entry2pfvf_map[entry] == pcifunc) { (*alloc_cnt)++; if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry)) (*enable_cnt)++; } } } void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt) { struct npc_mcam *mcam = &rvu->hw->mcam; int cntr; *alloc_cnt = 0; *enable_cnt = 0; for (cntr = 0; cntr < mcam->counters.max; cntr++) { if (mcam->cntr2pfvf_map[cntr] == pcifunc) { (*alloc_cnt)++; if (mcam->cntr_refcnt[cntr]) (*enable_cnt)++; } } } static int npc_mcam_verify_entry(struct npc_mcam *mcam, u16 pcifunc, int entry) { /* verify AF installed entries */ if (is_pffunc_af(pcifunc)) return 0; /* Verify if entry is valid and if it is indeed * allocated to the requesting PFFUNC. */ if (entry >= mcam->bmap_entries) return NPC_MCAM_INVALID_REQ; if (pcifunc != mcam->entry2pfvf_map[entry]) return NPC_MCAM_PERM_DENIED; return 0; } static int npc_mcam_verify_counter(struct npc_mcam *mcam, u16 pcifunc, int cntr) { /* Verify if counter is valid and if it is indeed * allocated to the requesting PFFUNC. */ if (cntr >= mcam->counters.max) return NPC_MCAM_INVALID_REQ; if (pcifunc != mcam->cntr2pfvf_map[cntr]) return NPC_MCAM_PERM_DENIED; return 0; } static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); u32 bank = npc_get_bank(mcam, entry); struct rvu_hwinfo *hw = rvu->hw; /* Set mapping and increment counter's refcnt */ mcam->entry2cntr_map[entry] = cntr; mcam->cntr_refcnt[cntr]++; /* Enable stats */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr); } static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); u32 bank = npc_get_bank(mcam, entry); /* Remove mapping and reduce counter's refcnt */ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; mcam->cntr_refcnt[cntr]--; /* Disable stats */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); } /* Sets MCAM entry in bitmap as used. Update * reverse bitmap too. Should be called with * 'mcam->lock' held. */ static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) { u16 entry, rentry; entry = index; rentry = mcam->bmap_entries - index - 1; __set_bit(entry, mcam->bmap); __set_bit(rentry, mcam->bmap_reverse); mcam->bmap_fcnt--; } /* Sets MCAM entry in bitmap as free. Update * reverse bitmap too. Should be called with * 'mcam->lock' held. */ static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) { u16 entry, rentry; entry = index; rentry = mcam->bmap_entries - index - 1; __clear_bit(entry, mcam->bmap); __clear_bit(rentry, mcam->bmap_reverse); mcam->bmap_fcnt++; } static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pcifunc) { u16 index, cntr; /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2pfvf_map[index] == pcifunc) { mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; /* Free the entry in bitmap */ npc_mcam_clear_bit(mcam, index); /* Disable the entry */ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); /* Update entry2counter mapping */ cntr = mcam->entry2cntr_map[index]; if (cntr != NPC_MCAM_INVALID_MAP) npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, cntr); mcam->entry2target_pffunc[index] = 0x0; } } } static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, u16 pcifunc) { u16 cntr; /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ for (cntr = 0; cntr < mcam->counters.max; cntr++) { if (mcam->cntr2pfvf_map[cntr] == pcifunc) { mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; mcam->cntr_refcnt[cntr] = 0; rvu_free_rsrc(&mcam->counters, cntr); /* This API is expected to be called after freeing * MCAM entries, which inturn will remove * 'entry to counter' mapping. * No need to do it again. */ } } } /* Find area of contiguous free entries of size 'nr'. * If not found return max contiguous free entries available. */ static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, u16 nr, u16 *max_area) { u16 max_area_start = 0; u16 index, next, end; *max_area = 0; again: index = find_next_zero_bit(map, size, start); if (index >= size) return max_area_start; end = ((index + nr) >= size) ? size : index + nr; next = find_next_bit(map, end, index); if (*max_area < (next - index)) { *max_area = next - index; max_area_start = index; } if (next < end) { start = next + 1; goto again; } return max_area_start; } /* Find number of free MCAM entries available * within range i.e in between 'start' and 'end'. */ static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) { u16 index, next; u16 fcnt = 0; again: if (start >= end) return fcnt; index = find_next_zero_bit(map, end, start); if (index >= end) return fcnt; next = find_next_bit(map, end, index); if (next <= end) { fcnt += next - index; start = next + 1; goto again; } fcnt += end - index; return fcnt; } static void npc_get_mcam_search_range_priority(struct npc_mcam *mcam, struct npc_mcam_alloc_entry_req *req, u16 *start, u16 *end, bool *reverse) { u16 fcnt; if (req->priority == NPC_MCAM_HIGHER_PRIO) goto hprio; /* For a low priority entry allocation * - If reference entry is not in hprio zone then * search range: ref_entry to end. * - If reference entry is in hprio zone and if * request can be accomodated in non-hprio zone then * search range: 'start of middle zone' to 'end' * - else search in reverse, so that less number of hprio * zone entries are allocated. */ *reverse = false; *start = req->ref_entry + 1; *end = mcam->bmap_entries; if (req->ref_entry >= mcam->hprio_end) return; fcnt = npc_mcam_get_free_count(mcam->bmap, mcam->hprio_end, mcam->bmap_entries); if (fcnt > req->count) *start = mcam->hprio_end; else *reverse = true; return; hprio: /* For a high priority entry allocation, search is always * in reverse to preserve hprio zone entries. * - If reference entry is not in lprio zone then * search range: 0 to ref_entry. * - If reference entry is in lprio zone and if * request can be accomodated in middle zone then * search range: 'hprio_end' to 'lprio_start' */ *reverse = true; *start = 0; *end = req->ref_entry; if (req->ref_entry <= mcam->lprio_start) return; fcnt = npc_mcam_get_free_count(mcam->bmap, mcam->hprio_end, mcam->lprio_start); if (fcnt < req->count) return; *start = mcam->hprio_end; *end = mcam->lprio_start; } static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, struct npc_mcam_alloc_entry_req *req, struct npc_mcam_alloc_entry_rsp *rsp) { u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; u16 fcnt, hp_fcnt, lp_fcnt; u16 start, end, index; int entry, next_start; bool reverse = false; unsigned long *bmap; u16 max_contig; mutex_lock(&mcam->lock); /* Check if there are any free entries */ if (!mcam->bmap_fcnt) { mutex_unlock(&mcam->lock); return NPC_MCAM_ALLOC_FAILED; } /* MCAM entries are divided into high priority, middle and * low priority zones. Idea is to not allocate top and lower * most entries as much as possible, this is to increase * probability of honouring priority allocation requests. * * Two bitmaps are used for mcam entry management, * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. * * Reverse bitmap is used to allocate entries * - when a higher priority entry is requested * - when available free entries are less. * Lower priority ones out of avaialble free entries are always * chosen when 'high vs low' question arises. */ /* Get the search range for priority allocation request */ if (req->priority) { npc_get_mcam_search_range_priority(mcam, req, &start, &end, &reverse); goto alloc; } /* For a VF base MCAM match rule is set by its PF. And all the * further MCAM rules installed by VF on its own are * concatenated with the base rule set by its PF. Hence PF entries * should be at lower priority compared to VF entries. Otherwise * base rule is hit always and rules installed by VF will be of * no use. Hence if the request is from PF and NOT a priority * allocation request then allocate low priority entries. */ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) goto lprio_alloc; /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. */ lp_fcnt = npc_mcam_get_free_count(mcam->bmap, mcam->lprio_start, mcam->bmap_entries); hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; /* Check if request can be accomodated in the middle zone */ if (fcnt > req->count) { start = mcam->hprio_end; end = mcam->lprio_start; } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { /* Expand search zone from half of hprio zone to * half of lprio zone. */ start = mcam->hprio_end / 2; end = mcam->bmap_entries - (mcam->lprio_count / 2); reverse = true; } else { /* Not enough free entries, search all entries in reverse, * so that low priority ones will get used up. */ lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; } alloc: if (reverse) { bmap = mcam->bmap_reverse; start = mcam->bmap_entries - start; end = mcam->bmap_entries - end; swap(start, end); } else { bmap = mcam->bmap; } if (req->contig) { /* Allocate requested number of contiguous entries, if * unsuccessful find max contiguous entries available. */ index = npc_mcam_find_zero_area(bmap, end, start, req->count, &max_contig); rsp->count = max_contig; if (reverse) rsp->entry = mcam->bmap_entries - index - max_contig; else rsp->entry = index; } else { /* Allocate requested number of non-contiguous entries, * if unsuccessful allocate as many as possible. */ rsp->count = 0; next_start = start; for (entry = 0; entry < req->count; entry++) { index = find_next_zero_bit(bmap, end, next_start); if (index >= end) break; next_start = start + (index - start) + 1; /* Save the entry's index */ if (reverse) index = mcam->bmap_entries - index - 1; entry_list[entry] = index; rsp->count++; } } /* If allocating requested no of entries is unsucessful, * expand the search range to full bitmap length and retry. */ if (!req->priority && (rsp->count < req->count) && ((end - start) != mcam->bmap_entries)) { reverse = true; start = 0; end = mcam->bmap_entries; goto alloc; } /* For priority entry allocation requests, if allocation is * failed then expand search to max possible range and retry. */ if (req->priority && rsp->count < req->count) { if (req->priority == NPC_MCAM_LOWER_PRIO && (start != (req->ref_entry + 1))) { start = req->ref_entry + 1; end = mcam->bmap_entries; reverse = false; goto alloc; } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && ((end - start) != req->ref_entry)) { start = 0; end = req->ref_entry; reverse = true; goto alloc; } } /* Copy MCAM entry indices into mbox response entry_list. * Requester always expects indices in ascending order, so * reverse the list if reverse bitmap is used for allocation. */ if (!req->contig && rsp->count) { index = 0; for (entry = rsp->count - 1; entry >= 0; entry--) { if (reverse) rsp->entry_list[index++] = entry_list[entry]; else rsp->entry_list[entry] = entry_list[entry]; } } /* Mark the allocated entries as used and set nixlf mapping */ for (entry = 0; entry < rsp->count; entry++) { index = req->contig ? (rsp->entry + entry) : rsp->entry_list[entry]; npc_mcam_set_bit(mcam, index); mcam->entry2pfvf_map[index] = pcifunc; mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; } /* Update available free count in mbox response */ rsp->free_count = mcam->bmap_fcnt; mutex_unlock(&mcam->lock); return 0; } /* Marks bitmaps to reserved the mcam slot */ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) { struct npc_mcam *mcam = &rvu->hw->mcam; npc_mcam_set_bit(mcam, entry_idx); } int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, struct npc_mcam_alloc_entry_req *req, struct npc_mcam_alloc_entry_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; rsp->entry = NPC_MCAM_ENTRY_INVALID; rsp->free_count = 0; /* Check if ref_entry is within range */ if (req->priority && req->ref_entry >= mcam->bmap_entries) { dev_err(rvu->dev, "%s: reference entry %d is out of range\n", __func__, req->ref_entry); return NPC_MCAM_INVALID_REQ; } /* ref_entry can't be '0' if requested priority is high. * Can't be last entry if requested priority is low. */ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || ((req->ref_entry == (mcam->bmap_entries - 1)) && req->priority == NPC_MCAM_LOWER_PRIO)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated indices needs to be sent to requester, * max number of non-contiguous entries per mbox msg is limited. */ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) { dev_err(rvu->dev, "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n", __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES); return NPC_MCAM_INVALID_REQ; } /* Alloc request from PFFUNC with no NIXLF attached should be denied */ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_ALLOC_DENIED; return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); } int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, struct npc_mcam_free_entry_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr, rc = 0; u16 cntr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; /* Free request from PFFUNC with no NIXLF attached, ignore */ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); if (req->all) goto free_all; rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) goto exit; mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); /* Update entry2counter mapping */ cntr = mcam->entry2cntr_map[req->entry]; if (cntr != NPC_MCAM_INVALID_MAP) npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, req->entry, cntr); goto exit; free_all: /* Free up all entries allocated to requesting PFFUNC */ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); exit: mutex_unlock(&mcam->lock); return rc; } int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, struct npc_mcam_read_entry_req *req, struct npc_mcam_read_entry_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (!rc) { npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, &rsp->entry_data, &rsp->intf, &rsp->enable); } mutex_unlock(&mcam->lock); return rc; } int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, struct npc_mcam_write_entry_req *req, struct msg_rsp *rsp) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr, rc; u8 nix_intf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) goto exit; if (req->set_cntr && npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } if (!is_npc_interface_valid(rvu, req->intf)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } if (is_npc_intf_tx(req->intf)) nix_intf = pfvf->nix_tx_intf; else nix_intf = pfvf->nix_rx_intf; if (!is_pffunc_af(pcifunc) && npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } /* For AF installed rules, the nix_intf should be set to target NIX */ if (is_pffunc_af(req->hdr.pcifunc)) nix_intf = req->intf; npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, &req->entry_data, req->enable_entry); if (req->set_cntr) npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, req->entry, req->cntr); rc = 0; exit: mutex_unlock(&mcam->lock); return rc; } int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, struct npc_mcam_ena_dis_entry_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); mutex_unlock(&mcam->lock); if (rc) return rc; npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); return 0; } int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, struct npc_mcam_ena_dis_entry_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); mutex_unlock(&mcam->lock); if (rc) return rc; npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); return 0; } int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, struct npc_mcam_shift_entry_req *req, struct npc_mcam_shift_entry_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; u16 old_entry, new_entry; int blkaddr, rc = 0; u16 index, cntr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; if (req->shift_count > NPC_MCAM_MAX_SHIFTS) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); for (index = 0; index < req->shift_count; index++) { old_entry = req->curr_entry[index]; new_entry = req->new_entry[index]; /* Check if both old and new entries are valid and * does belong to this PFFUNC or not. */ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); if (rc) break; rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); if (rc) break; /* new_entry should not have a counter mapped */ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { rc = NPC_MCAM_PERM_DENIED; break; } /* Disable the new_entry */ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); /* Copy rule from old entry to new entry */ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); /* Copy counter mapping, if any */ cntr = mcam->entry2cntr_map[old_entry]; if (cntr != NPC_MCAM_INVALID_MAP) { npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, old_entry, cntr); npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, new_entry, cntr); } /* Enable new_entry and disable old_entry */ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); } /* If shift has failed then report the failed index */ if (index != req->shift_count) { rc = NPC_MCAM_PERM_DENIED; rsp->failed_entry_idx = index; } mutex_unlock(&mcam->lock); return rc; } int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, struct npc_mcam_alloc_counter_req *req, struct npc_mcam_alloc_counter_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; u16 max_contig, cntr; int blkaddr, index; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; /* If the request is from a PFFUNC with no NIXLF attached, ignore */ if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated counter IDs needs to be sent to requester, * max number of non-contiguous counters per mbox msg is limited. */ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); /* Check if unused counters are available or not */ if (!rvu_rsrc_free_count(&mcam->counters)) { mutex_unlock(&mcam->lock); return NPC_MCAM_ALLOC_FAILED; } rsp->count = 0; if (req->contig) { /* Allocate requested number of contiguous counters, if * unsuccessful find max contiguous entries available. */ index = npc_mcam_find_zero_area(mcam->counters.bmap, mcam->counters.max, 0, req->count, &max_contig); rsp->count = max_contig; rsp->cntr = index; for (cntr = index; cntr < (index + max_contig); cntr++) { __set_bit(cntr, mcam->counters.bmap); mcam->cntr2pfvf_map[cntr] = pcifunc; } } else { /* Allocate requested number of non-contiguous counters, * if unsuccessful allocate as many as possible. */ for (cntr = 0; cntr < req->count; cntr++) { index = rvu_alloc_rsrc(&mcam->counters); if (index < 0) break; rsp->cntr_list[cntr] = index; rsp->count++; mcam->cntr2pfvf_map[index] = pcifunc; } } mutex_unlock(&mcam->lock); return 0; } int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 index, entry = 0; int blkaddr, err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); if (err) { mutex_unlock(&mcam->lock); return err; } /* Mark counter as free/unused */ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; rvu_free_rsrc(&mcam->counters, req->cntr); /* Disable all MCAM entry's stats which are using this counter */ while (entry < mcam->bmap_entries) { if (!mcam->cntr_refcnt[req->cntr]) break; index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } mutex_unlock(&mcam->lock); return 0; } int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 index, entry = 0; int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); if (rc) goto exit; /* Unmap the MCAM entry and counter */ if (!req->all) { rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); if (rc) goto exit; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, req->entry, req->cntr); goto exit; } /* Disable all MCAM entry's stats which are using this counter */ while (entry < mcam->bmap_entries) { if (!mcam->cntr_refcnt[req->cntr]) break; index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; entry = index + 1; if (mcam->entry2cntr_map[index] != req->cntr) continue; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } exit: mutex_unlock(&mcam->lock); return rc; } int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); mutex_unlock(&mcam->lock); if (err) return err; rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); return 0; } int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, struct npc_mcam_oper_counter_req *req, struct npc_mcam_oper_counter_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); mutex_unlock(&mcam->lock); if (err) return err; rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); rsp->stat &= BIT_ULL(48) - 1; return 0; } int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam_alloc_and_write_entry_req *req, struct npc_mcam_alloc_and_write_entry_rsp *rsp) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam_alloc_counter_req cntr_req; struct npc_mcam_alloc_counter_rsp cntr_rsp; struct npc_mcam_alloc_entry_req entry_req; struct npc_mcam_alloc_entry_rsp entry_rsp; struct npc_mcam *mcam = &rvu->hw->mcam; u16 entry = NPC_MCAM_ENTRY_INVALID; u16 cntr = NPC_MCAM_ENTRY_INVALID; int blkaddr, rc; u8 nix_intf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, req->hdr.pcifunc)) return NPC_MCAM_INVALID_REQ; /* Try to allocate a MCAM entry */ entry_req.hdr.pcifunc = req->hdr.pcifunc; entry_req.contig = true; entry_req.priority = req->priority; entry_req.ref_entry = req->ref_entry; entry_req.count = 1; rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &entry_req, &entry_rsp); if (rc) return rc; if (!entry_rsp.count) return NPC_MCAM_ALLOC_FAILED; entry = entry_rsp.entry; if (!req->alloc_cntr) goto write_entry; /* Now allocate counter */ cntr_req.hdr.pcifunc = req->hdr.pcifunc; cntr_req.contig = true; cntr_req.count = 1; rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); if (rc) { /* Free allocated MCAM entry */ mutex_lock(&mcam->lock); mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; npc_mcam_clear_bit(mcam, entry); mutex_unlock(&mcam->lock); return rc; } cntr = cntr_rsp.cntr; write_entry: mutex_lock(&mcam->lock); if (is_npc_intf_tx(req->intf)) nix_intf = pfvf->nix_tx_intf; else nix_intf = pfvf->nix_rx_intf; npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, &req->entry_data, req->enable_entry); if (req->alloc_cntr) npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); mutex_unlock(&mcam->lock); rsp->entry = entry; rsp->cntr = cntr; return 0; } #define GET_KEX_CFG(intf) \ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) #define GET_KEX_FLAGS(ld) \ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) #define GET_KEX_LD(intf, lid, lt, ld) \ rvu_read64(rvu, BLKADDR_NPC, \ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) #define GET_KEX_LDFLAGS(intf, ld, fl) \ rvu_read64(rvu, BLKADDR_NPC, \ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, struct npc_get_kex_cfg_rsp *rsp) { int lid, lt, ld, fl; rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { for (ld = 0; ld < NPC_MAX_LD; ld++) { rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); } } } for (ld = 0; ld < NPC_MAX_LD; ld++) rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); for (ld = 0; ld < NPC_MAX_LD; ld++) { for (fl = 0; fl < NPC_MAX_LFL; fl++) { rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); } } memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); return 0; } static int npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) { struct npc_kpu_action0 *act0; u8 shift_count = 0; int blkaddr; u64 val; if (!var_len_off_mask) return -EINVAL; if (var_len_off_mask != 0xff) { if (shift_dir) shift_count = __ffs(var_len_off_mask); else shift_count = (8 - __fls(var_len_off_mask)); } blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); return -EINVAL; } val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); act0 = (struct npc_kpu_action0 *)&val; act0->var_len_shift = shift_count; act0->var_len_right = shift_dir; act0->var_len_mask = var_len_off_mask; act0->var_len_offset = var_len_off; rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); return 0; } int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, u64 pkind, u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr, nixlf, rc, intf_mode; int pf = rvu_get_pf(pcifunc); u64 rxpkind, txpkind; u8 cgx_id, lmac_id; /* use default pkind to disable edsa/higig */ rxpkind = rvu_npc_get_pkind(rvu, pf); txpkind = NPC_TX_DEF_PKIND; intf_mode = NPC_INTF_MODE_DEF; if (mode & OTX2_PRIV_FLAGS_CUSTOM) { if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, var_len_off, var_len_off_mask, shift_dir); if (rc) return rc; } rxpkind = pkind; txpkind = pkind; } if (dir & PKIND_RX) { /* rx pkind set req valid only for cgx mapped PFs */ if (!is_cgx_config_permitted(rvu, pcifunc)) return 0; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, rxpkind); if (rc) return rc; } if (dir & PKIND_TX) { /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (rc) return rc; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), txpkind); } pfvf->intf_mode = intf_mode; return 0; } int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, struct msg_rsp *rsp) { return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, req->dir, req->pkind, req->var_len_off, req->var_len_off_mask, req->shift_dir); } int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr, nixlf, rc = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u8 intf, enable; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; /* Return the channel number in case of PF */ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { pfvf = rvu_get_pfvf(rvu, pcifunc); rsp->entry.kw[0] = pfvf->rx_chan_base; rsp->entry.kw_mask[0] = 0xFFFULL; goto out; } /* Find the pkt steering rule installed by PF to this VF */ mutex_lock(&mcam->lock); for (index = 0; index < mcam->bmap_entries; index++) { if (mcam->entry2target_pffunc[index] == pcifunc) goto read_entry; } rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (rc < 0) { mutex_unlock(&mcam->lock); goto out; } /* Read the default ucast entry if there is no pkt steering rule */ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); read_entry: /* Read the mcam entry */ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, &enable); mutex_unlock(&mcam->lock); out: return rc; } int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, struct npc_mcam_get_stats_req *req, struct npc_mcam_get_stats_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 index, cntr; int blkaddr; u64 regval; u32 bank; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); index = req->entry & (mcam->banksize - 1); bank = npc_get_bank(mcam, req->entry); /* read MCAM entry STAT_ACT register */ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); if (!(regval & rvu->hw->npc_stat_ena)) { rsp->stat_ena = 0; mutex_unlock(&mcam->lock); return 0; } cntr = regval & 0x1FF; rsp->stat_ena = 1; rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(cntr)); rsp->stat &= BIT_ULL(48) - 1; mutex_unlock(&mcam->lock); return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
// SPDX-License-Identifier: GPL-2.0-only /* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. * */ #include <linux/bitfield.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "mbox.h" #include "rvu.h" /* CPT PF device id */ #define PCI_DEVID_OTX2_CPT_PF 0xA0FD #define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2 /* Length of initial context fetch in 128 byte words */ #define CPT_CTX_ILEN 1ULL #define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ ({ \ u64 free_sts = 0, busy_sts = 0; \ typeof(rsp) _rsp = rsp; \ u32 e, i; \ \ for (e = (e_min), i = 0; e < (e_max); e++, i++) { \ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \ if (reg & 0x1) \ busy_sts |= 1ULL << i; \ \ if (reg & 0x2) \ free_sts |= 1ULL << i; \ } \ (_rsp)->busy_sts_##etype = busy_sts; \ (_rsp)->free_sts_##etype = free_sts; \ }) static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) { struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr; u64 reg, val; int i, eng; u8 grp; reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec)); dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg); i = -1; while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) { switch (vec) { case 0: eng = i; break; case 1: eng = i + 64; break; case 2: eng = i + 128; break; } grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF; /* Disable and enable the engine which triggers fault */ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0); val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng)); rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL); rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp); rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL); spin_lock(&rvu->cpt_intr_lock); block->cpt_flt_eng_map[vec] |= BIT_ULL(i); val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng)); val = val & 0x3; if (val == 0x1 || val == 0x2) block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i); spin_unlock(&rvu->cpt_intr_lock); } rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg); return IRQ_HANDLED; } static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr) { return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr); } static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr) { return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr); } static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr) { return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr); } static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr) { struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg); rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg); return IRQ_HANDLED; } static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr) { struct rvu_block *block = ptr; struct rvu *rvu = block->rvu; int blkaddr = block->addr; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg); return IRQ_HANDLED; } static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs, irq_handler_t handler, const char *name) { struct rvu *rvu = block->rvu; int ret; ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0, name, block); if (ret) { dev_err(rvu->dev, "RVUAF: %s irq registration failed", name); return ret; } WARN_ON(rvu->irq_allocated[irq_offs]); rvu->irq_allocated[irq_offs] = true; return 0; } static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; int i; /* Disable all CPT AF interrupts */ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[off + i]) { free_irq(pci_irq_vector(rvu->pdev, off + i), block); rvu->irq_allocated[off + i] = false; } } static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int i, offs; if (!is_block_implemented(rvu->hw, blkaddr)) return; offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; if (!offs) { dev_warn(rvu->dev, "Failed to get CPT_AF_INT vector offsets\n"); return; } block = &hw->block[blkaddr]; if (!is_rvu_otx2(rvu)) return cpt_10k_unregister_interrupts(block, offs); /* Disable all CPT AF interrupts */ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++) rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL); rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); for (i = 0; i < CPT_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), block); rvu->irq_allocated[offs + i] = false; } } void rvu_cpt_unregister_interrupts(struct rvu *rvu) { cpt_unregister_interrupts(rvu, BLKADDR_CPT0); cpt_unregister_interrupts(rvu, BLKADDR_CPT1); } static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; irq_handler_t flt_fn; int i, ret; for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); switch (i) { case CPT_10K_AF_INT_VEC_FLT0: flt_fn = rvu_cpt_af_flt0_intr_handler; break; case CPT_10K_AF_INT_VEC_FLT1: flt_fn = rvu_cpt_af_flt1_intr_handler; break; case CPT_10K_AF_INT_VEC_FLT2: flt_fn = rvu_cpt_af_flt2_intr_handler; break; } ret = rvu_cpt_do_register_interrupt(block, off + i, flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; if (i == CPT_10K_AF_INT_VEC_FLT2) rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); else rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, rvu_cpt_af_rvu_intr_handler, "CPTAF RVU"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS, rvu_cpt_af_ras_intr_handler, "CPTAF RAS"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); return 0; err: rvu_cpt_unregister_interrupts(rvu); return ret; } static int cpt_register_interrupts(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; irq_handler_t flt_fn; int i, offs, ret = 0; if (!is_block_implemented(rvu->hw, blkaddr)) return 0; block = &hw->block[blkaddr]; offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF; if (!offs) { dev_warn(rvu->dev, "Failed to get CPT_AF_INT vector offsets\n"); return 0; } if (!is_rvu_otx2(rvu)) return cpt_10k_register_interrupts(block, offs); for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) { sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i); switch (i) { case CPT_AF_INT_VEC_FLT0: flt_fn = rvu_cpt_af_flt0_intr_handler; break; case CPT_AF_INT_VEC_FLT1: flt_fn = rvu_cpt_af_flt1_intr_handler; break; } ret = rvu_cpt_do_register_interrupt(block, offs + i, flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); } ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU, rvu_cpt_af_rvu_intr_handler, "CPTAF RVU"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS, rvu_cpt_af_ras_intr_handler, "CPTAF RAS"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1); return 0; err: rvu_cpt_unregister_interrupts(rvu); return ret; } int rvu_cpt_register_interrupts(struct rvu *rvu) { int ret; ret = cpt_register_interrupts(rvu, BLKADDR_CPT0); if (ret) return ret; return cpt_register_interrupts(rvu, BLKADDR_CPT1); } static int get_cpt_pf_num(struct rvu *rvu) { int i, domain_nr, cpt_pf_num = -1; struct pci_dev *pdev; domain_nr = pci_domain_nr(rvu->pdev->bus); for (i = 0; i < rvu->hw->total_pfs; i++) { pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0); if (!pdev) continue; if (pdev->device == PCI_DEVID_OTX2_CPT_PF || pdev->device == PCI_DEVID_OTX2_CPT10K_PF) { cpt_pf_num = i; put_device(&pdev->dev); break; } put_device(&pdev->dev); } return cpt_pf_num; } static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; if (pcifunc & RVU_PFVF_FUNC_MASK) return false; return true; } static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; if (rvu_get_pf(pcifunc) != cpt_pf_num) return false; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return false; return true; } static int validate_and_get_cpt_blkaddr(int req_blkaddr) { int blkaddr; blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0; if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) return -EINVAL; return blkaddr; } int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, struct cpt_lf_alloc_req_msg *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; int cptlf, blkaddr; int num_lfs, slot; u64 val; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; if (req->eng_grpmsk == 0x0) return CPT_AF_ERR_GRP_INVALID; block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), block->addr); if (!num_lfs) return CPT_AF_ERR_LF_INVALID; /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ if (req->nix_pf_func) { /* If default, use 'this' CPTLF's PFFUNC */ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC) req->nix_pf_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX)) return CPT_AF_ERR_NIX_PF_FUNC_INVALID; } /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ if (req->sso_pf_func) { /* If default, use 'this' CPTLF's PFFUNC */ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC) req->sso_pf_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO)) return CPT_AF_ERR_SSO_PF_FUNC_INVALID; } for (slot = 0; slot < num_lfs; slot++) { cptlf = rvu_get_lf(rvu, block, pcifunc, slot); if (cptlf < 0) return CPT_AF_ERR_LF_INVALID; /* Set CPT LF group and priority */ val = (u64)req->eng_grpmsk << 48 | 1; if (!is_rvu_otx2(rvu)) { if (req->ctx_ilen_valid) val |= (req->ctx_ilen << 17); else val |= (CPT_CTX_ILEN << 17); } rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set * on reset. */ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32)); val |= ((u64)req->nix_pf_func << 48 | (u64)req->sso_pf_func << 32); rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); } return 0; } static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr) { u16 pcifunc = req->hdr.pcifunc; int num_lfs, cptlf, slot, err; struct rvu_block *block; block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), block->addr); if (!num_lfs) return 0; for (slot = 0; slot < num_lfs; slot++) { cptlf = rvu_get_lf(rvu, block, pcifunc, slot); if (cptlf < 0) return CPT_AF_ERR_LF_INVALID; /* Perform teardown */ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot); /* Reset LF */ err = rvu_lf_reset(rvu, block, cptlf); if (err) { dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", block->addr, cptlf); } } return 0; } int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { int ret; ret = cpt_lf_free(rvu, req, BLKADDR_CPT0); if (ret) return ret; if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) ret = cpt_lf_free(rvu, req, BLKADDR_CPT1); return ret; } static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf, struct cpt_inline_ipsec_cfg_msg *req) { u16 sso_pf_func = req->sso_pf_func; u8 nix_sel; u64 val; val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); if (req->enable && (val & BIT_ULL(16))) { /* IPSec inline outbound path is already enabled for a given * CPT LF, HRM states that inline inbound & outbound paths * must not be enabled at the same time for a given CPT LF */ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA; } /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) return CPT_AF_ERR_SSO_PF_FUNC_INVALID; nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; /* Enable CPT LF for IPsec inline inbound operations */ if (req->enable) val |= BIT_ULL(9); else val &= ~BIT_ULL(9); val |= (u64)nix_sel << 8; rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); if (sso_pf_func) { /* Set SSO_PF_FUNC */ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); val |= (u64)sso_pf_func << 32; val |= (u64)req->nix_pf_func << 48; rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); } if (req->sso_pf_func_ovrd) /* Set SSO_PF_FUNC_OVRD for inline IPSec */ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1); /* Configure the X2P Link register with the cpt base channel number and * range of channels it should propagate to X2P */ if (!is_rvu_otx2(rvu)) { val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16); val |= (u64)rvu->hw->cpt_chan_base; rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val); rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val); } return 0; } static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf, struct cpt_inline_ipsec_cfg_msg *req) { u16 nix_pf_func = req->nix_pf_func; int nix_blkaddr; u8 nix_sel; u64 val; val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); if (req->enable && (val & BIT_ULL(9))) { /* IPSec inline inbound path is already enabled for a given * CPT LF, HRM states that inline inbound & outbound paths * must not be enabled at the same time for a given CPT LF */ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA; } /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) return CPT_AF_ERR_NIX_PF_FUNC_INVALID; /* Enable CPT LF for IPsec inline outbound operations */ if (req->enable) val |= BIT_ULL(16); else val &= ~BIT_ULL(16); rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); if (nix_pf_func) { /* Set NIX_PF_FUNC */ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); val |= (u64)nix_pf_func << 48; rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func); nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1; val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); val |= (u64)nix_sel << 8; rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); } return 0; } int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, struct cpt_inline_ipsec_cfg_msg *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; int cptlf, blkaddr, ret; u16 actual_slot; blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, req->slot, &actual_slot); if (blkaddr < 0) return CPT_AF_ERR_LF_INVALID; block = &rvu->hw->block[blkaddr]; cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); if (cptlf < 0) return CPT_AF_ERR_LF_INVALID; switch (req->dir) { case CPT_INLINE_INBOUND: ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req); break; case CPT_INLINE_OUTBOUND: ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req); break; default: return CPT_AF_ERR_PARAM; } return ret; } static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) { u64 offset = req->reg_offset; int blkaddr, num_lfs, lf; struct rvu_block *block; struct rvu_pfvf *pfvf; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return false; /* Registers that can be accessed from PF/VF */ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) || (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) { if (offset & 7) return false; lf = (offset & 0xFFF) >> 3; block = &rvu->hw->block[blkaddr]; pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (lf >= num_lfs) /* Slot is not valid for that PF/VF */ return false; /* Translate local LF used by VFs to global CPT LF */ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc, lf); if (lf < 0) return false; return true; } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { /* Registers that can be accessed from PF */ switch (offset) { case CPT_AF_DIAG: case CPT_AF_CTL: case CPT_AF_PF_FUNC: case CPT_AF_BLK_RST: case CPT_AF_CONSTANTS1: case CPT_AF_CTX_FLUSH_TIMER: return true; } switch (offset & 0xFF000) { case CPT_AF_EXEX_STS(0): case CPT_AF_EXEX_CTL(0): case CPT_AF_EXEX_CTL2(0): case CPT_AF_EXEX_UCODE_BASE(0): if (offset & 7) return false; break; default: return false; } return true; } return false; } int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req, struct cpt_rd_wr_reg_msg *rsp) { int blkaddr; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; /* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) && !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; rsp->reg_offset = req->reg_offset; rsp->ret_val = req->ret_val; rsp->is_write = req->is_write; if (!is_valid_offset(rvu, req)) return CPT_AF_ERR_ACCESS_DENIED; if (req->is_write) rvu_write64(rvu, blkaddr, req->reg_offset, req->val); else rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); return 0; } static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) { if (is_rvu_otx2(rvu)) return; rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC); rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC); rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC); rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_LATENCY_PC); rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC); rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_LATENCY_PC); rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_LATENCY_PC); rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_LATENCY_PC); rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC); rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_LATENCY_PC); rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR); rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID); rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER); rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME); rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); } static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) { u16 max_ses, max_ies, max_aes; u32 e_min = 0, e_max = 0; u64 reg; reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); max_ses = reg & 0xffff; max_ies = (reg >> 16) & 0xffff; max_aes = (reg >> 32) & 0xffff; /* Get AE status */ e_min = max_ses + max_ies; e_max = max_ses + max_ies + max_aes; cpt_get_eng_sts(e_min, e_max, rsp, ae); /* Get SE status */ e_min = 0; e_max = max_ses; cpt_get_eng_sts(e_min, e_max, rsp, se); /* Get IE status */ e_min = max_ses; e_max = max_ses + max_ies; cpt_get_eng_sts(e_min, e_max, rsp, ie); } int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, struct cpt_sts_rsp *rsp) { int blkaddr; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; /* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) && !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; get_ctx_pc(rvu, rsp, blkaddr); /* Get CPT engines status */ get_eng_sts(rvu, rsp, blkaddr); /* Read CPT instruction PC registers */ rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); rsp->active_cycles_pc = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG); return 0; } #define RXC_ZOMBIE_THRES GENMASK_ULL(59, 48) #define RXC_ZOMBIE_LIMIT GENMASK_ULL(43, 32) #define RXC_ACTIVE_THRES GENMASK_ULL(27, 16) #define RXC_ACTIVE_LIMIT GENMASK_ULL(11, 0) #define RXC_ACTIVE_COUNT GENMASK_ULL(60, 48) #define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48) static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, int blkaddr, struct cpt_rxc_time_cfg_req *save) { u64 dfrg_reg; if (save) { /* Save older config */ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg); save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg); save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg); save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg); save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); } dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres); dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit); dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres); dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit); rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step); rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg); } int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, struct msg_rsp *rsp) { int blkaddr; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; /* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) && !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; cpt_rxc_time_cfg(rvu, req, blkaddr, NULL); return 0; } int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc); } int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; int cptlf, blkaddr, ret; u16 actual_slot; u64 ctl, ctl2; blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, req->slot, &actual_slot); if (blkaddr < 0) return CPT_AF_ERR_LF_INVALID; block = &rvu->hw->block[blkaddr]; cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); if (cptlf < 0) return CPT_AF_ERR_LF_INVALID; ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); ret = rvu_lf_reset(rvu, block, cptlf); if (ret) dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", block->addr, cptlf); rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl); rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2); return 0; } int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req, struct cpt_flt_eng_info_rsp *rsp) { struct rvu_block *block; unsigned long flags; int blkaddr, vec; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; block = &rvu->hw->block[blkaddr]; for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) { spin_lock_irqsave(&rvu->cpt_intr_lock, flags); rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec]; rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec]; if (req->reset) { block->cpt_flt_eng_map[vec] = 0x0; block->cpt_rcvrd_eng_map[vec] = 0x0; } spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags); } return 0; } static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) { struct cpt_rxc_time_cfg_req req, prev; int timeout = 2000; u64 reg; if (is_rvu_otx2(rvu)) return; /* Set time limit to minimum values, so that rxc entries will be * flushed out quickly. */ req.step = 1; req.zombie_thres = 1; req.zombie_limit = 1; req.active_thres = 1; req.active_limit = 1; cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev); do { reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); udelay(1); if (FIELD_GET(RXC_ACTIVE_COUNT, reg)) timeout--; else break; } while (timeout); if (timeout == 0) dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n"); timeout = 2000; do { reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); udelay(1); if (FIELD_GET(RXC_ZOMBIE_COUNT, reg)) timeout--; else break; } while (timeout); if (timeout == 0) dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n"); /* Restore config */ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL); } #define INFLIGHT GENMASK_ULL(8, 0) #define GRB_CNT GENMASK_ULL(39, 32) #define GWB_CNT GENMASK_ULL(47, 40) #define XQ_XOR GENMASK_ULL(63, 63) #define DQPTR GENMASK_ULL(19, 0) #define NQPTR GENMASK_ULL(51, 32) static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot) { int timeout = 1000000; u64 inprog, inst_ptr; u64 qsize, pending; int i = 0; /* Disable instructions enqueuing */ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0); inprog = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); inprog |= BIT_ULL(16); rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog); qsize = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF; do { inst_ptr = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR)); pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) + FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr); udelay(1); timeout--; } while ((pending != 0) && (timeout != 0)); if (timeout == 0) dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n"); timeout = 1000000; /* Wait for CPT queue to become execution-quiescent */ do { inprog = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); if ((FIELD_GET(INFLIGHT, inprog) == 0) && (FIELD_GET(GRB_CNT, inprog) == 0)) { i++; } else { i = 0; timeout--; } } while ((timeout != 0) && (i < 10)); if (timeout == 0) dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n"); /* Wait for 2 us to flush all queue writes to memory */ udelay(2); } int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot) { u64 reg; if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc)) cpt_rxc_teardown(rvu, blkaddr); mutex_lock(&rvu->alias_lock); /* Enable BAR2 ALIAS for this pcifunc. */ reg = BIT_ULL(16) | pcifunc; rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); cpt_lf_disable_iqueue(rvu, blkaddr, slot); rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); mutex_unlock(&rvu->alias_lock); return 0; } #define CPT_RES_LEN 16 #define CPT_SE_IE_EGRP 1ULL static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr, int nix_blkaddr) { int cpt_pf_num = rvu->cpt_pf_num; struct cpt_inst_lmtst_req *req; dma_addr_t res_daddr; int timeout = 3000; u8 cpt_idx; u64 *inst; u16 *res; int rc; res = kzalloc(CPT_RES_LEN, GFP_KERNEL); if (!res) return -ENOMEM; res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN, DMA_BIDIRECTIONAL); if (dma_mapping_error(rvu->dev, res_daddr)) { dev_err(rvu->dev, "DMA mapping failed for CPT result\n"); rc = -EFAULT; goto res_free; } *res = 0xFFFF; /* Send mbox message to CPT PF */ req = (struct cpt_inst_lmtst_req *) otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num, sizeof(*req), sizeof(struct msg_rsp)); if (!req) { rc = -ENOMEM; goto res_daddr_unmap; } req->hdr.sig = OTX2_MBOX_REQ_SIG; req->hdr.id = MBOX_MSG_CPT_INST_LMTST; inst = req->inst; /* Prepare CPT_INST_S */ inst[0] = 0; inst[1] = res_daddr; /* AF PF FUNC */ inst[2] = 0; /* Set QORD */ inst[3] = 1; inst[4] = 0; inst[5] = 0; inst[6] = 0; /* Set EGRP */ inst[7] = CPT_SE_IE_EGRP << 61; /* Subtract 1 from the NIX-CPT credit count to preserve * credit counts. */ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1; rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), BIT_ULL(22) - 1); otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num); if (rc) dev_warn(rvu->dev, "notification to pf %d failed\n", cpt_pf_num); /* Wait for CPT instruction to be completed */ do { mdelay(1); if (*res == 0xFFFF) timeout--; else break; } while (timeout); if (timeout == 0) dev_warn(rvu->dev, "Poll for result hits hard loop counter\n"); res_daddr_unmap: dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL); res_free: kfree(res); return 0; } #define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46) #define CTX_CAM_CPTR GENMASK_ULL(45, 0) int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc) { int nix_blkaddr, blkaddr; u16 max_ctx_entries, i; int slot = 0, num_lfs; u64 reg, cam_data; int rc; nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (nix_blkaddr < 0) return -EINVAL; if (is_rvu_otx2(rvu)) return 0; blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0; /* Submit CPT_INST_S to track when all packets have been * flushed through for the NIX PF FUNC in inline inbound case. */ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr); if (rc) return rc; /* Wait for rxc entries to be flushed out */ cpt_rxc_teardown(rvu, blkaddr); reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); max_ctx_entries = (reg >> 48) & 0xFFF; mutex_lock(&rvu->rsrc_lock); num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), blkaddr); if (num_lfs == 0) { dev_warn(rvu->dev, "CPT LF is not configured\n"); goto unlock; } /* Enable BAR2 ALIAS for this pcifunc. */ reg = BIT_ULL(16) | pcifunc; rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); for (i = 0; i < max_ctx_entries; i++) { cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i)); if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) && FIELD_GET(CTX_CAM_CPTR, cam_data)) { reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data); rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH), reg); } } rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); unlock: mutex_unlock(&rvu->rsrc_lock); return 0; } int rvu_cpt_init(struct rvu *rvu) { /* Retrieve CPT PF number */ rvu->cpt_pf_num = get_cpt_pf_num(rvu); spin_lock_init(&rvu->cpt_intr_lock); return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell PTP driver * * Copyright (C) 2020 Marvell. * */ #include <linux/bitfield.h> #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/hrtimer.h> #include <linux/ktime.h> #include "mbox.h" #include "ptp.h" #include "rvu.h" #define DRV_NAME "Marvell PTP Driver" #define PCI_DEVID_OCTEONTX2_PTP 0xA00C #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 #define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600 #define PCI_DEVID_OCTEONTX2_RST 0xA085 #define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 #define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 #define PCI_PTP_BAR_NO 0 #define PTP_CLOCK_CFG 0xF00ULL #define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0) #define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1) #define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2) #define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) #define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) #define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) #define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26) #define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) #define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) #define PTP_PPS_HI_INCR 0xF60ULL #define PTP_PPS_LO_INCR 0xF68ULL #define PTP_PPS_THRESH_HI 0xF58ULL #define PTP_CLOCK_LO 0xF08ULL #define PTP_CLOCK_HI 0xF10ULL #define PTP_CLOCK_COMP 0xF18ULL #define PTP_TIMESTAMP 0xF20ULL #define PTP_CLOCK_SEC 0xFD0ULL #define PTP_SEC_ROLLOVER 0xFD8ULL /* Atomic update related CSRs */ #define PTP_FRNS_TIMESTAMP 0xFE0ULL #define PTP_NXT_ROLLOVER_SET 0xFE8ULL #define PTP_CURR_ROLLOVER_SET 0xFF0ULL #define PTP_NANO_TIMESTAMP 0xFF8ULL #define PTP_SEC_TIMESTAMP 0x1000ULL #define CYCLE_MULT 1000 #define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0) #define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1) /* PTP atomic update operation type */ enum atomic_opcode { ATOMIC_SET = 1, ATOMIC_INC = 3, ATOMIC_DEC = 4 }; static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; static bool is_ptp_dev_cnf10ka(struct ptp *ptp) { return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP; } static bool is_ptp_dev_cn10ka(struct ptp *ptp) { return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP; } static bool cn10k_ptp_errata(struct ptp *ptp) { if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && (is_rev_A0(ptp) || is_rev_A1(ptp))) return true; return false; } static bool is_tstmp_atomic_update_supported(struct rvu *rvu) { struct ptp *ptp = rvu->ptp; if (is_rvu_otx2(rvu)) return false; /* On older silicon variants of CN10K, atomic update feature * is not available. */ if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) && (is_rev_A0(ptp) || is_rev_A1(ptp))) return false; return true; } static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer) { struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer); ktime_t curr_ts = ktime_get(); ktime_t delta_ns, period_ns; u64 ptp_clock_hi; /* calculate the elapsed time since last restart */ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts)); /* if the ptp clock value has crossed 0.5 seconds, * its too late to update pps threshold value, so * update threshold after 1 second. */ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); if (ptp_clock_hi > 500000000) { period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi)); } else { writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI); period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns)); } hrtimer_forward_now(hrtimer, period_ns); ptp->last_ts = curr_ts; return HRTIMER_RESTART; } static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns) { ktime_t period_ns; period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns)); hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL); ptp->last_ts = ktime_get(); } static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp) { u64 sec, sec1, nsec; unsigned long flags; spin_lock_irqsave(&ptp->ptp_lock, flags); sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL; nsec = readq(ptp->reg_base + PTP_CLOCK_HI); sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL; /* check nsec rollover */ if (sec1 > sec) { nsec = readq(ptp->reg_base + PTP_CLOCK_HI); sec = sec1; } spin_unlock_irqrestore(&ptp->ptp_lock, flags); return sec * NSEC_PER_SEC + nsec; } static u64 read_ptp_tstmp_nsec(struct ptp *ptp) { return readq(ptp->reg_base + PTP_CLOCK_HI); } static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq) { u64 comp, adj = 0, cycles_per_sec, ns_drift = 0; u32 ptp_clock_nsec, cycle_time; int cycle; /* Errata: * Issue #1: At the time of 1 sec rollover of the nano-second counter, * the nano-second counter is set to 0. However, it should be set to * (existing counter_value - 10^9). * * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF. * It should roll over at 0x3B9A_CA00. */ /* calculate ptp_clock_comp value */ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq; /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq; /* cycles per sec */ cycles_per_sec = ptp_clock_freq; /* check whether ptp nanosecond counter rolls over early */ cycle = cycles_per_sec - 1; ptp_clock_nsec = (cycle * comp) >> 32; while (ptp_clock_nsec < NSEC_PER_SEC) { if (ptp_clock_nsec == 0x3B9AC9FF) goto calc_adj_comp; cycle++; ptp_clock_nsec = (cycle * comp) >> 32; } /* compute nanoseconds lost per second when nsec counter rolls over */ ns_drift = ptp_clock_nsec - NSEC_PER_SEC; /* calculate ptp_clock_comp adjustment */ if (ns_drift > 0) { adj = comp * ns_drift; adj = adj / 1000000000ULL; } /* speed up the ptp clock to account for nanoseconds lost */ comp += adj; return comp; calc_adj_comp: /* slow down the ptp clock to not rollover early */ adj = comp * cycle_time; adj = adj / 1000000000ULL; adj = adj / CYCLE_MULT; comp -= adj; return comp; } struct ptp *ptp_get(void) { struct ptp *ptp = first_ptp_block; /* Check PTP block is present in hardware */ if (!pci_dev_present(ptp_id_table)) return ERR_PTR(-ENODEV); /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); else if (!IS_ERR(ptp)) pci_dev_get(ptp->pdev); return ptp; } void ptp_put(struct ptp *ptp) { if (!ptp) return; pci_dev_put(ptp->pdev); } static void ptp_atomic_update(struct ptp *ptp, u64 timestamp) { u64 regval, curr_rollover_set, nxt_rollover_set; /* First setup NSECs and SECs */ writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP); writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); writeq(timestamp / NSEC_PER_SEC, ptp->reg_base + PTP_SEC_TIMESTAMP); nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC); curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC; writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET); writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET); /* Now, initiate atomic update */ regval = readq(ptp->reg_base + PTP_CLOCK_CFG); regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; regval |= (ATOMIC_SET << 26); writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); } static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta) { bool neg_adj = false, atomic_inc_dec = false; u64 regval, ptp_clock_hi; if (delta < 0) { delta = -delta; neg_adj = true; } /* use atomic inc/dec when delta < 1 second */ if (delta < NSEC_PER_SEC) atomic_inc_dec = true; if (!atomic_inc_dec) { ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); if (neg_adj) { if (ptp_clock_hi > delta) ptp_clock_hi -= delta; else ptp_clock_hi = delta - ptp_clock_hi; } else { ptp_clock_hi += delta; } ptp_atomic_update(ptp, ptp_clock_hi); } else { writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP); writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); /* initiate atomic inc/dec */ regval = readq(ptp->reg_base + PTP_CLOCK_CFG); regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26); writeq(regval, ptp->reg_base + PTP_CLOCK_CFG); } } static int ptp_adjfine(struct ptp *ptp, long scaled_ppm) { bool neg_adj = false; u32 freq, freq_adj; u64 comp, adj; s64 ppb; if (scaled_ppm < 0) { neg_adj = true; scaled_ppm = -scaled_ppm; } /* The hardware adds the clock compensation value to the PTP clock * on every coprocessor clock cycle. Typical convention is that it * represent number of nanosecond betwen each cycle. In this * convention compensation value is in 64 bit fixed-point * representation where upper 32 bits are number of nanoseconds * and lower is fractions of nanosecond. * The scaled_ppm represent the ratio in "parts per million" by which * the compensation value should be corrected. * To calculate new compenstation value we use 64bit fixed point * arithmetic on following formula * comp = tbase + tbase * scaled_ppm / (1M * 2^16) * where tbase is the basic compensation value calculated * initialy in the probe function. */ /* convert scaled_ppm to ppb */ ppb = 1 + scaled_ppm; ppb *= 125; ppb >>= 13; if (cn10k_ptp_errata(ptp)) { /* calculate the new frequency based on ppb */ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL; freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj; comp = ptp_calc_adjusted_comp(freq); } else { comp = ((u64)1000000000ull << 32) / ptp->clock_rate; adj = comp * ppb; adj = div_u64(adj, 1000000000ull); comp = neg_adj ? comp - adj : comp + adj; } writeq(comp, ptp->reg_base + PTP_CLOCK_COMP); return 0; } static int ptp_get_clock(struct ptp *ptp, u64 *clk) { /* Return the current PTP clock */ *clk = ptp->read_ptp_tstmp(ptp); return 0; } void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts) { struct ptp *ptp = rvu->ptp; struct pci_dev *pdev; u64 clock_comp; u64 clock_cfg; if (!ptp) return; pdev = ptp->pdev; if (!sclk) { dev_err(&pdev->dev, "PTP input clock cannot be zero\n"); return; } /* sclk is in MHz */ ptp->clock_rate = sclk * 1000000; /* Program the seconds rollover value to 1 second */ if (is_tstmp_atomic_update_supported(rvu)) { writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP); writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP); writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP); writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET); writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET); writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER); } /* Enable PTP clock */ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); if (ext_clk_freq) { ptp->clock_rate = ext_clk_freq; /* Set GPIO as PTP clock source */ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK; clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN; } if (extts) { clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE; /* Set GPIO as timestamping source */ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK; clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN; } clock_cfg |= PTP_CLOCK_CFG_PTP_EN; clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK; clock_cfg |= (ATOMIC_SET << 26); writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); /* Set 50% duty cycle for 1Hz output */ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); if (cn10k_ptp_errata(ptp)) { /* The ptp_clock_hi rollsover to zero once clock cycle before it * reaches one second boundary. so, program the pps_lo_incr in * such a way that the pps threshold value comparison at one * second boundary will succeed and pps edge changes. After each * one second boundary, the hrtimer handler will be invoked and * reprograms the pps threshold value. */ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate; writeq((0x1dcd6500ULL - ptp->clock_period) << 32, ptp->reg_base + PTP_PPS_LO_INCR); } if (cn10k_ptp_errata(ptp)) clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate); else clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; /* Initial compensation value to start the nanosecs counter */ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); } static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) { u64 timestamp; if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) { timestamp = readq(ptp->reg_base + PTP_TIMESTAMP); *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF); } else { *clk = readq(ptp->reg_base + PTP_TIMESTAMP); } return 0; } static int ptp_set_thresh(struct ptp *ptp, u64 thresh) { if (!cn10k_ptp_errata(ptp)) writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI); return 0; } static int ptp_extts_on(struct ptp *ptp, int on) { u64 ptp_clock_hi; if (cn10k_ptp_errata(ptp)) { if (on) { ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI); ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi); } else { if (hrtimer_active(&ptp->hrtimer)) hrtimer_cancel(&ptp->hrtimer); } } return 0; } static int ptp_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct ptp *ptp; int err; ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); if (!ptp) { err = -ENOMEM; goto error; } ptp->pdev = pdev; err = pcim_enable_device(pdev); if (err) goto error_free; err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev)); if (err) goto error_free; ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; pci_set_drvdata(pdev, ptp); if (!first_ptp_block) first_ptp_block = ptp; spin_lock_init(&ptp->ptp_lock); if (cn10k_ptp_errata(ptp)) { ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ptp->hrtimer.function = ptp_reset_thresh; } else { ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; } return 0; error_free: kfree(ptp); error: /* For `ptp_get()` we need to differentiate between the case * when the core has not tried to probe this device and the case when * the probe failed. In the later case we keep the error in * `dev->driver_data`. */ pci_set_drvdata(pdev, ERR_PTR(err)); if (!first_ptp_block) first_ptp_block = ERR_PTR(err); return err; } static void ptp_remove(struct pci_dev *pdev) { struct ptp *ptp = pci_get_drvdata(pdev); u64 clock_cfg; if (IS_ERR_OR_NULL(ptp)) return; if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer)) hrtimer_cancel(&ptp->hrtimer); /* Disable PTP clock */ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); kfree(ptp); } static const struct pci_device_id ptp_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) }, { 0, } }; struct pci_driver ptp_driver = { .name = DRV_NAME, .id_table = ptp_id_table, .probe = ptp_probe, .remove = ptp_remove, }; int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, struct ptp_rsp *rsp) { int err = 0; /* This function is the PTP mailbox handler invoked when * called by AF consumers/netdev drivers via mailbox mechanism. * It is used by netdev driver to get the PTP clock and to set * frequency adjustments. Since mailbox can be called without * notion of whether the driver is bound to ptp device below * validation is needed as first step. */ if (!rvu->ptp) return -ENODEV; switch (req->op) { case PTP_OP_ADJFINE: err = ptp_adjfine(rvu->ptp, req->scaled_ppm); break; case PTP_OP_GET_CLOCK: err = ptp_get_clock(rvu->ptp, &rsp->clk); break; case PTP_OP_GET_TSTMP: err = ptp_get_tstmp(rvu->ptp, &rsp->clk); break; case PTP_OP_SET_THRESH: err = ptp_set_thresh(rvu->ptp, req->thresh); break; case PTP_OP_EXTTS_ON: err = ptp_extts_on(rvu->ptp, req->extts_on); break; case PTP_OP_ADJTIME: ptp_atomic_adjtime(rvu->ptp, req->delta); break; case PTP_OP_SET_CLOCK: ptp_atomic_update(rvu->ptp, (u64)req->clk); break; default: err = -EINVAL; break; } return err; } int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req, struct ptp_get_cap_rsp *rsp) { if (!rvu->ptp) return -ENODEV; if (is_tstmp_atomic_update_supported(rvu)) rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE; else rsp->cap &= ~BIT_ULL_MASK(0); return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/ptp.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/module.h> #include <linux/pci.h> #include "rvu_struct.h" #include "common.h" #include "mbox.h" #include "rvu.h" struct reg_range { u64 start; u64 end; }; struct hw_reg_map { u8 regblk; u8 num_ranges; u64 mask; #define MAX_REG_RANGES 8 struct reg_range range[MAX_REG_RANGES]; }; static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, {0x1610, 0x1618}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; bool rvu_check_valid_reg(int regmap, int regblk, u64 reg) { int idx; struct hw_reg_map *map; /* Only 64bit offsets */ if (reg & 0x07) return false; if (regmap == TXSCHQ_HWREGMAP) { if (regblk >= NIX_TXSCH_LVL_CNT) return false; map = &txsch_reg_map[regblk]; } else { return false; } /* Should never happen */ if (map->regblk != regblk) return false; reg &= map->mask; for (idx = 0; idx < map->num_ranges; idx++) { if (reg >= map->range[idx].start && reg < map->range[idx].end) return true; } return false; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/sysfs.h> #include "cgx.h" #include "rvu.h" #include "rvu_reg.h" #include "ptp.h" #include "mcs.h" #include "rvu_trace.h" #include "rvu_npc_hash.h" #define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)); enum { TYPE_AFVF, TYPE_AFPF, }; /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, { 0, } /* end of table */ }; MODULE_AUTHOR("Sunil Goutham <[email protected]>"); MODULE_DESCRIPTION(DRV_STRING); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, rvu_id_table); static char *mkex_profile; /* MKEX profile name */ module_param(mkex_profile, charp, 0000); MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); static char *kpu_profile; /* KPU profile name */ module_param(kpu_profile, charp, 0000); MODULE_PARM_DESC(kpu_profile, "KPU profile name string"); static void rvu_setup_hw_capabilities(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1; hw->cap.nix_fixed_txschq_mapping = false; hw->cap.nix_shaping = true; hw->cap.nix_tx_link_bp = true; hw->cap.nix_rx_multicast = true; hw->cap.nix_shaper_toggle_wait = false; hw->cap.npc_hash_extract = false; hw->cap.npc_exact_match_enabled = false; hw->rvu = rvu; if (is_rvu_pre_96xx_C0(rvu)) { hw->cap.nix_fixed_txschq_mapping = true; hw->cap.nix_txsch_per_cgx_lmac = 4; hw->cap.nix_txsch_per_lbk_lmac = 132; hw->cap.nix_txsch_per_sdp_lmac = 76; hw->cap.nix_shaping = false; hw->cap.nix_tx_link_bp = false; if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) hw->cap.nix_rx_multicast = false; } if (!is_rvu_pre_96xx_C0(rvu)) hw->cap.nix_shaper_toggle_wait = true; if (!is_rvu_otx2(rvu)) hw->cap.per_pf_mbox_regs = true; if (is_rvu_npc_hash_extract_en(rvu)) hw->cap.npc_hash_extract = true; } /* Poll a RVU block's register 'offset', for a 'zero' * or 'nonzero' at bits specified by 'mask' */ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) { unsigned long timeout = jiffies + usecs_to_jiffies(20000); bool twice = false; void __iomem *reg; u64 reg_val; reg = rvu->afreg_base + ((block << 28) | offset); again: reg_val = readq(reg); if (zero && !(reg_val & mask)) return 0; if (!zero && (reg_val & mask)) return 0; if (time_before(jiffies, timeout)) { usleep_range(1, 5); goto again; } /* In scenarios where CPU is scheduled out before checking * 'time_before' (above) and gets scheduled in such that * jiffies are beyond timeout value, then check again if HW is * done with the operation in the meantime. */ if (!twice) { twice = true; goto again; } return -EBUSY; } int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) { int id; if (!rsrc->bmap) return -EINVAL; id = find_first_zero_bit(rsrc->bmap, rsrc->max); if (id >= rsrc->max) return -ENOSPC; __set_bit(id, rsrc->bmap); return id; } int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) { int start; if (!rsrc->bmap) return -EINVAL; start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); if (start >= rsrc->max) return -ENOSPC; bitmap_set(rsrc->bmap, start, nrsrc); return start; } static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) { if (!rsrc->bmap) return; if (start >= rsrc->max) return; bitmap_clear(rsrc->bmap, start, nrsrc); } bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) { int start; if (!rsrc->bmap) return false; start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); if (start >= rsrc->max) return false; return true; } void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) { if (!rsrc->bmap) return; __clear_bit(id, rsrc->bmap); } int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) { int used; if (!rsrc->bmap) return 0; used = bitmap_weight(rsrc->bmap, rsrc->max); return (rsrc->max - used); } bool is_rsrc_free(struct rsrc_bmap *rsrc, int id) { if (!rsrc->bmap) return false; return !test_bit(id, rsrc->bmap); } int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) { rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), sizeof(long), GFP_KERNEL); if (!rsrc->bmap) return -ENOMEM; return 0; } void rvu_free_bitmap(struct rsrc_bmap *rsrc) { kfree(rsrc->bmap); } /* Get block LF's HW index from a PF_FUNC's block slot number */ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) { u16 match = 0; int lf; mutex_lock(&rvu->rsrc_lock); for (lf = 0; lf < block->lf.max; lf++) { if (block->fn_map[lf] == pcifunc) { if (slot == match) { mutex_unlock(&rvu->rsrc_lock); return lf; } match++; } } mutex_unlock(&rvu->rsrc_lock); return -ENODEV; } /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. * Some silicon variants of OcteonTX2 supports * multiple blocks of same type. * * @pcifunc has to be zero when no LF is yet attached. * * For a pcifunc if LFs are attached from multiple blocks of same type, then * return blkaddr of first encountered block. */ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) { int devnum, blkaddr = -ENODEV; u64 cfg, reg; bool is_pf; switch (blktype) { case BLKTYPE_NPC: blkaddr = BLKADDR_NPC; goto exit; case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; goto exit; case BLKTYPE_NIX: /* For now assume NIX0 */ if (!pcifunc) { blkaddr = BLKADDR_NIX0; goto exit; } break; case BLKTYPE_SSO: blkaddr = BLKADDR_SSO; goto exit; case BLKTYPE_SSOW: blkaddr = BLKADDR_SSOW; goto exit; case BLKTYPE_TIM: blkaddr = BLKADDR_TIM; goto exit; case BLKTYPE_CPT: /* For now assume CPT0 */ if (!pcifunc) { blkaddr = BLKADDR_CPT0; goto exit; } break; } /* Check if this is a RVU PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) { is_pf = false; devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; devnum = rvu_get_pf(pcifunc); } /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or * 'BLKADDR_NIX1'. */ if (blktype == BLKTYPE_NIX) { reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) : RVU_PRIV_HWVFX_NIXX_CFG(0); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); if (cfg) { blkaddr = BLKADDR_NIX0; goto exit; } reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) : RVU_PRIV_HWVFX_NIXX_CFG(1); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); if (cfg) blkaddr = BLKADDR_NIX1; } if (blktype == BLKTYPE_CPT) { reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) : RVU_PRIV_HWVFX_CPTX_CFG(0); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); if (cfg) { blkaddr = BLKADDR_CPT0; goto exit; } reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) : RVU_PRIV_HWVFX_CPTX_CFG(1); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); if (cfg) blkaddr = BLKADDR_CPT1; } exit: if (is_block_implemented(rvu->hw, blkaddr)) return blkaddr; return -ENODEV; } static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, u16 pcifunc, u16 lf, bool attach) { int devnum, num_lfs = 0; bool is_pf; u64 reg; if (lf >= block->lf.max) { dev_err(&rvu->pdev->dev, "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", __func__, lf, block->name, block->lf.max); return; } /* Check if this is for a RVU PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) { is_pf = false; devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; devnum = rvu_get_pf(pcifunc); } block->fn_map[lf] = attach ? pcifunc : 0; switch (block->addr) { case BLKADDR_NPA: pfvf->npalf = attach ? true : false; num_lfs = pfvf->npalf; break; case BLKADDR_NIX0: case BLKADDR_NIX1: pfvf->nixlf = attach ? true : false; num_lfs = pfvf->nixlf; break; case BLKADDR_SSO: attach ? pfvf->sso++ : pfvf->sso--; num_lfs = pfvf->sso; break; case BLKADDR_SSOW: attach ? pfvf->ssow++ : pfvf->ssow--; num_lfs = pfvf->ssow; break; case BLKADDR_TIM: attach ? pfvf->timlfs++ : pfvf->timlfs--; num_lfs = pfvf->timlfs; break; case BLKADDR_CPT0: attach ? pfvf->cptlfs++ : pfvf->cptlfs--; num_lfs = pfvf->cptlfs; break; case BLKADDR_CPT1: attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; num_lfs = pfvf->cpt1_lfs; break; } reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); } inline int rvu_get_pf(u16 pcifunc) { return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; } void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) { u64 cfg; /* Get numVFs attached to this PF and first HWVF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); if (numvfs) *numvfs = (cfg >> 12) & 0xFF; if (hwvf) *hwvf = cfg & 0xFFF; } int rvu_get_hwvf(struct rvu *rvu, int pcifunc) { int pf, func; u64 cfg; pf = rvu_get_pf(pcifunc); func = pcifunc & RVU_PFVF_FUNC_MASK; /* Get first HWVF attached to this PF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); return ((cfg & 0xFFF) + func - 1); } struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) { /* Check if it is a PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; else return &rvu->pf[rvu_get_pf(pcifunc)]; } static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) { int pf, vf, nvfs; u64 cfg; pf = rvu_get_pf(pcifunc); if (pf >= rvu->hw->total_pfs) return false; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return true; /* Check if VF is within number of VFs attached to this PF */ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); nvfs = (cfg >> 12) & 0xFF; if (vf >= nvfs) return false; return true; } bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) { struct rvu_block *block; if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) return false; block = &hw->block[blkaddr]; return block->implemented; } static void rvu_check_block_implemented(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkid; u64 cfg; /* For each block check if 'implemented' bit is set */ for (blkid = 0; blkid < BLK_COUNT; blkid++) { block = &hw->block[blkid]; cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); if (cfg & BIT_ULL(11)) block->implemented = true; } } static void rvu_setup_rvum_blk_revid(struct rvu *rvu) { rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), RVU_BLK_RVUM_REVID); } static void rvu_clear_rvum_blk_revid(struct rvu *rvu) { rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00); } int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) { int err; if (!block->implemented) return 0; rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12)); err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12), true); return err; } static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) { struct rvu_block *block = &rvu->hw->block[blkaddr]; int err; if (!block->implemented) return; rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); if (err) { dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr); while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY) ; } } static void rvu_reset_all_blocks(struct rvu *rvu) { /* Do a HW reset of all RVU blocks */ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); } static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) { struct rvu_pfvf *pfvf; u64 cfg; int lf; for (lf = 0; lf < block->lf.max; lf++) { cfg = rvu_read64(rvu, block->addr, block->lfcfg_reg | (lf << block->lfshift)); if (!(cfg & BIT_ULL(63))) continue; /* Set this resource as being used */ __set_bit(lf, block->lf.bmap); /* Get, to whom this LF is attached */ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); rvu_update_rsrc_map(rvu, pfvf, block, (cfg >> 8) & 0xFFFF, lf, true); /* Set start MSIX vector for this LF within this PF/VF */ rvu_set_msix_offset(rvu, pfvf, block, lf); } } static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) { int min_vecs; if (!vf) goto check_pf; if (!nvecs) { dev_warn(rvu->dev, "PF%d:VF%d is configured with zero msix vectors, %d\n", pf, vf - 1, nvecs); } return; check_pf: if (pf == 0) min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; else min_vecs = RVU_PF_INT_VEC_CNT; if (!(nvecs < min_vecs)) return; dev_warn(rvu->dev, "PF%d is configured with too few vectors, %d, min is %d\n", pf, nvecs, min_vecs); } static int rvu_setup_msix_resources(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; int pf, vf, numvfs, hwvf, err; int nvecs, offset, max_msix; struct rvu_pfvf *pfvf; u64 cfg, phy_addr; dma_addr_t iova; for (pf = 0; pf < hw->total_pfs; pf++) { cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); /* If PF is not enabled, nothing to do */ if (!((cfg >> 20) & 0x01)) continue; rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); pfvf = &rvu->pf[pf]; /* Get num of MSIX vectors attached to this PF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); /* Alloc msix bitmap for this PF */ err = rvu_alloc_bitmap(&pfvf->msix); if (err) return err; /* Allocate memory for MSIX vector to RVU block LF mapping */ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, sizeof(u16), GFP_KERNEL); if (!pfvf->msix_lfmap) return -ENOMEM; /* For PF0 (AF) firmware will set msix vector offsets for * AF, block AF and PF0_INT vectors, so jump to VFs. */ if (!pf) goto setup_vfmsix; /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. * These are allocated on driver init and never freed, * so no need to set 'msix_lfmap' for these. */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); nvecs = (cfg >> 12) & 0xFF; cfg &= ~0x7FFULL; offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); setup_vfmsix: /* Alloc msix bitmap for VFs */ for (vf = 0; vf < numvfs; vf++) { pfvf = &rvu->hwvf[hwvf + vf]; /* Get num of MSIX vectors attached to this VF */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); pfvf->msix.max = (cfg & 0xFFF) + 1; rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); /* Alloc msix bitmap for this VF */ err = rvu_alloc_bitmap(&pfvf->msix); if (err) return err; pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, sizeof(u16), GFP_KERNEL); if (!pfvf->msix_lfmap) return -ENOMEM; /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. * These are allocated on driver init and never freed, * so no need to set 'msix_lfmap' for these. */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); nvecs = (cfg >> 12) & 0xFF; cfg &= ~0x7FFULL; offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), cfg | offset); } } /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence * create an IOMMU mapping for the physical address configured by * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); max_msix = cfg & 0xFFFFF; if (rvu->fwdata && rvu->fwdata->msixtr_base) phy_addr = rvu->fwdata->msixtr_base; else phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); iova = dma_map_resource(rvu->dev, phy_addr, max_msix * PCI_MSIX_ENTRY_SIZE, DMA_BIDIRECTIONAL, 0); if (dma_mapping_error(rvu->dev, iova)) return -ENOMEM; rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); rvu->msix_base_iova = iova; rvu->msixtr_base_phy = phy_addr; return 0; } static void rvu_reset_msix(struct rvu *rvu) { /* Restore msixtr base register */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, rvu->msixtr_base_phy); } static void rvu_free_hw_resources(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; struct rvu_pfvf *pfvf; int id, max_msix; u64 cfg; rvu_npa_freemem(rvu); rvu_npc_freemem(rvu); rvu_nix_freemem(rvu); /* Free block LF bitmaps */ for (id = 0; id < BLK_COUNT; id++) { block = &hw->block[id]; kfree(block->lf.bmap); } /* Free MSIX bitmaps */ for (id = 0; id < hw->total_pfs; id++) { pfvf = &rvu->pf[id]; kfree(pfvf->msix.bmap); } for (id = 0; id < hw->total_vfs; id++) { pfvf = &rvu->hwvf[id]; kfree(pfvf->msix.bmap); } /* Unmap MSIX vector base IOVA mapping */ if (!rvu->msix_base_iova) return; cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); max_msix = cfg & 0xFFFFF; dma_unmap_resource(rvu->dev, rvu->msix_base_iova, max_msix * PCI_MSIX_ENTRY_SIZE, DMA_BIDIRECTIONAL, 0); rvu_reset_msix(rvu); mutex_destroy(&rvu->rsrc_lock); } static void rvu_setup_pfvf_macaddress(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; int pf, vf, numvfs, hwvf; struct rvu_pfvf *pfvf; u64 *mac; for (pf = 0; pf < hw->total_pfs; pf++) { /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */ if (!pf) goto lbkvf; if (!is_pf_cgxmapped(rvu, pf)) continue; /* Assign MAC address to PF */ pfvf = &rvu->pf[pf]; if (rvu->fwdata && pf < PF_MACNUM_MAX) { mac = &rvu->fwdata->pf_macs[pf]; if (*mac) u64_to_ether_addr(*mac, pfvf->mac_addr); else eth_random_addr(pfvf->mac_addr); } else { eth_random_addr(pfvf->mac_addr); } ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); lbkvf: /* Assign MAC address to VFs*/ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); for (vf = 0; vf < numvfs; vf++, hwvf++) { pfvf = &rvu->hwvf[hwvf]; if (rvu->fwdata && hwvf < VF_MACNUM_MAX) { mac = &rvu->fwdata->vf_macs[hwvf]; if (*mac) u64_to_ether_addr(*mac, pfvf->mac_addr); else eth_random_addr(pfvf->mac_addr); } else { eth_random_addr(pfvf->mac_addr); } ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); } } } static int rvu_fwdata_init(struct rvu *rvu) { u64 fwdbase; int err; /* Get firmware data base address */ err = cgx_get_fwdata_base(&fwdbase); if (err) goto fail; rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); if (!rvu->fwdata) goto fail; if (!is_rvu_fwdata_valid(rvu)) { dev_err(rvu->dev, "Mismatch in 'fwdata' struct btw kernel and firmware\n"); iounmap(rvu->fwdata); rvu->fwdata = NULL; return -EINVAL; } return 0; fail: dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n"); return -EIO; } static void rvu_fwdata_exit(struct rvu *rvu) { if (rvu->fwdata) iounmap(rvu->fwdata); } static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkid; u64 cfg; /* Init NIX LF's bitmap */ block = &hw->block[blkaddr]; if (!block->implemented) return 0; blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1; cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); block->lf.max = cfg & 0xFFF; block->addr = blkaddr; block->type = BLKTYPE_NIX; block->lfshift = 8; block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid); block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid); block->lfcfg_reg = NIX_PRIV_LFX_CFG; block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; block->lfreset_reg = NIX_AF_LF_RST; block->rvu = rvu; sprintf(block->name, "NIX%d", blkid); rvu->nix_blkaddr[blkid] = blkaddr; return rvu_alloc_bitmap(&block->lf); } static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkid; u64 cfg; /* Init CPT LF's bitmap */ block = &hw->block[blkaddr]; if (!block->implemented) return 0; blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1; cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); block->lf.max = cfg & 0xFF; block->addr = blkaddr; block->type = BLKTYPE_CPT; block->multislot = true; block->lfshift = 3; block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid); block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid); block->lfcfg_reg = CPT_PRIV_LFX_CFG; block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; block->lfreset_reg = CPT_AF_LF_RST; block->rvu = rvu; sprintf(block->name, "CPT%d", blkid); return rvu_alloc_bitmap(&block->lf); } static void rvu_get_lbk_bufsize(struct rvu *rvu) { struct pci_dev *pdev = NULL; void __iomem *base; u64 lbk_const; pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, pdev); if (!pdev) return; base = pci_ioremap_bar(pdev, 0); if (!base) goto err_put; lbk_const = readq(base + LBK_CONST); /* cache fifo size */ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const); iounmap(base); err_put: pci_dev_put(pdev); } static int rvu_setup_hw_resources(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkid, err; u64 cfg; /* Get HW supported max RVU PF & VF count */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); hw->total_pfs = (cfg >> 32) & 0xFF; hw->total_vfs = (cfg >> 20) & 0xFFF; hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; /* Init NPA LF's bitmap */ block = &hw->block[BLKADDR_NPA]; if (!block->implemented) goto nix; cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); block->lf.max = (cfg >> 16) & 0xFFF; block->addr = BLKADDR_NPA; block->type = BLKTYPE_NPA; block->lfshift = 8; block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; block->lfcfg_reg = NPA_PRIV_LFX_CFG; block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; block->lfreset_reg = NPA_AF_LF_RST; block->rvu = rvu; sprintf(block->name, "NPA"); err = rvu_alloc_bitmap(&block->lf); if (err) { dev_err(rvu->dev, "%s: Failed to allocate NPA LF bitmap\n", __func__); return err; } nix: err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); if (err) { dev_err(rvu->dev, "%s: Failed to allocate NIX0 LFs bitmap\n", __func__); return err; } err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); if (err) { dev_err(rvu->dev, "%s: Failed to allocate NIX1 LFs bitmap\n", __func__); return err; } /* Init SSO group's bitmap */ block = &hw->block[BLKADDR_SSO]; if (!block->implemented) goto ssow; cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); block->lf.max = cfg & 0xFFFF; block->addr = BLKADDR_SSO; block->type = BLKTYPE_SSO; block->multislot = true; block->lfshift = 3; block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; block->lfreset_reg = SSO_AF_LF_HWGRP_RST; block->rvu = rvu; sprintf(block->name, "SSO GROUP"); err = rvu_alloc_bitmap(&block->lf); if (err) { dev_err(rvu->dev, "%s: Failed to allocate SSO LF bitmap\n", __func__); return err; } ssow: /* Init SSO workslot's bitmap */ block = &hw->block[BLKADDR_SSOW]; if (!block->implemented) goto tim; block->lf.max = (cfg >> 56) & 0xFF; block->addr = BLKADDR_SSOW; block->type = BLKTYPE_SSOW; block->multislot = true; block->lfshift = 3; block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; block->lfreset_reg = SSOW_AF_LF_HWS_RST; block->rvu = rvu; sprintf(block->name, "SSOWS"); err = rvu_alloc_bitmap(&block->lf); if (err) { dev_err(rvu->dev, "%s: Failed to allocate SSOW LF bitmap\n", __func__); return err; } tim: /* Init TIM LF's bitmap */ block = &hw->block[BLKADDR_TIM]; if (!block->implemented) goto cpt; cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); block->lf.max = cfg & 0xFFFF; block->addr = BLKADDR_TIM; block->type = BLKTYPE_TIM; block->multislot = true; block->lfshift = 3; block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; block->lfcfg_reg = TIM_PRIV_LFX_CFG; block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; block->lfreset_reg = TIM_AF_LF_RST; block->rvu = rvu; sprintf(block->name, "TIM"); err = rvu_alloc_bitmap(&block->lf); if (err) { dev_err(rvu->dev, "%s: Failed to allocate TIM LF bitmap\n", __func__); return err; } cpt: err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); if (err) { dev_err(rvu->dev, "%s: Failed to allocate CPT0 LF bitmap\n", __func__); return err; } err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); if (err) { dev_err(rvu->dev, "%s: Failed to allocate CPT1 LF bitmap\n", __func__); return err; } /* Allocate memory for PFVF data */ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, sizeof(struct rvu_pfvf), GFP_KERNEL); if (!rvu->pf) { dev_err(rvu->dev, "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__); return -ENOMEM; } rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, sizeof(struct rvu_pfvf), GFP_KERNEL); if (!rvu->hwvf) { dev_err(rvu->dev, "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__); return -ENOMEM; } mutex_init(&rvu->rsrc_lock); rvu_fwdata_init(rvu); err = rvu_setup_msix_resources(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to setup MSIX resources\n", __func__); return err; } for (blkid = 0; blkid < BLK_COUNT; blkid++) { block = &hw->block[blkid]; if (!block->lf.bmap) continue; /* Allocate memory for block LF/slot to pcifunc mapping info */ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, sizeof(u16), GFP_KERNEL); if (!block->fn_map) { err = -ENOMEM; goto msix_err; } /* Scan all blocks to check if low level firmware has * already provisioned any of the resources to a PF/VF. */ rvu_scan_block(rvu, block); } err = rvu_set_channels_base(rvu); if (err) goto msix_err; err = rvu_npc_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__); goto npc_err; } err = rvu_cgx_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__); goto cgx_err; } err = rvu_npc_exact_init(rvu); if (err) { dev_err(rvu->dev, "failed to initialize exact match table\n"); return err; } /* Assign MACs for CGX mapped functions */ rvu_setup_pfvf_macaddress(rvu); err = rvu_npa_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__); goto npa_err; } rvu_get_lbk_bufsize(rvu); err = rvu_nix_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__); goto nix_err; } err = rvu_sdp_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__); goto nix_err; } rvu_program_channels(rvu); err = rvu_mcs_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__); goto nix_err; } err = rvu_cpt_init(rvu); if (err) { dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__); goto mcs_err; } return 0; mcs_err: rvu_mcs_exit(rvu); nix_err: rvu_nix_freemem(rvu); npa_err: rvu_npa_freemem(rvu); cgx_err: rvu_cgx_exit(rvu); npc_err: rvu_npc_freemem(rvu); rvu_fwdata_exit(rvu); msix_err: rvu_reset_msix(rvu); return err; } /* NPA and NIX admin queue APIs */ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq) { if (!aq) return; qmem_free(rvu->dev, aq->inst); qmem_free(rvu->dev, aq->res); devm_kfree(rvu->dev, aq); } int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, int qsize, int inst_size, int res_size) { struct admin_queue *aq; int err; *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL); if (!*ad_queue) return -ENOMEM; aq = *ad_queue; /* Alloc memory for instructions i.e AQ */ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size); if (err) { devm_kfree(rvu->dev, aq); return err; } /* Alloc memory for results */ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size); if (err) { rvu_aq_free(rvu, aq); return err; } spin_lock_init(&aq->lock); return 0; } int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, struct ready_msg_rsp *rsp) { if (rvu->fwdata) { rsp->rclk_freq = rvu->fwdata->rclk; rsp->sclk_freq = rvu->fwdata->sclk; } return 0; } /* Get current count of a RVU block's LF/slots * provisioned to a given RVU func. */ u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) { switch (blkaddr) { case BLKADDR_NPA: return pfvf->npalf ? 1 : 0; case BLKADDR_NIX0: case BLKADDR_NIX1: return pfvf->nixlf ? 1 : 0; case BLKADDR_SSO: return pfvf->sso; case BLKADDR_SSOW: return pfvf->ssow; case BLKADDR_TIM: return pfvf->timlfs; case BLKADDR_CPT0: return pfvf->cptlfs; case BLKADDR_CPT1: return pfvf->cpt1_lfs; } return 0; } /* Return true if LFs of block type are attached to pcifunc */ static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) { switch (blktype) { case BLKTYPE_NPA: return pfvf->npalf ? 1 : 0; case BLKTYPE_NIX: return pfvf->nixlf ? 1 : 0; case BLKTYPE_SSO: return !!pfvf->sso; case BLKTYPE_SSOW: return !!pfvf->ssow; case BLKTYPE_TIM: return !!pfvf->timlfs; case BLKTYPE_CPT: return pfvf->cptlfs || pfvf->cpt1_lfs; } return false; } bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) { struct rvu_pfvf *pfvf; if (!is_pf_func_valid(rvu, pcifunc)) return false; pfvf = rvu_get_pfvf(rvu, pcifunc); /* Check if this PFFUNC has a LF of type blktype attached */ if (!is_blktype_attached(pfvf, blktype)) return false; return true; } static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, int pcifunc, int slot) { u64 val; val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); rvu_write64(rvu, block->addr, block->lookup_reg, val); /* Wait for the lookup to finish */ /* TODO: put some timeout here */ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) ; val = rvu_read64(rvu, block->addr, block->lookup_reg); /* Check LF valid bit */ if (!(val & (1ULL << 12))) return -1; return (val & 0xFFF); } int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, u16 global_slot, u16 *slot_in_block) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int numlfs, total_lfs = 0, nr_blocks = 0; int i, num_blkaddr[BLK_COUNT] = { 0 }; struct rvu_block *block; int blkaddr; u16 start_slot; if (!is_blktype_attached(pfvf, blktype)) return -ENODEV; /* Get all the block addresses from which LFs are attached to * the given pcifunc in num_blkaddr[]. */ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) { block = &rvu->hw->block[blkaddr]; if (block->type != blktype) continue; if (!is_block_implemented(rvu->hw, blkaddr)) continue; numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr); if (numlfs) { total_lfs += numlfs; num_blkaddr[nr_blocks] = blkaddr; nr_blocks++; } } if (global_slot >= total_lfs) return -ENODEV; /* Based on the given global slot number retrieve the * correct block address out of all attached block * addresses and slot number in that block. */ total_lfs = 0; blkaddr = -ENODEV; for (i = 0; i < nr_blocks; i++) { numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]); total_lfs += numlfs; if (global_slot < total_lfs) { blkaddr = num_blkaddr[i]; start_slot = total_lfs - numlfs; *slot_in_block = global_slot - start_slot; break; } } return blkaddr; } static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int slot, lf, num_lfs; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); if (blkaddr < 0) return; if (blktype == BLKTYPE_NIX) rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (!num_lfs) return; for (slot = 0; slot < num_lfs; slot++) { lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); if (lf < 0) /* This should never happen */ continue; /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); /* Update SW maintained mapping info as well */ rvu_update_rsrc_map(rvu, pfvf, block, pcifunc, lf, false); /* Free the resource */ rvu_free_rsrc(&block->lf, lf); /* Clear MSIX vector offset for this LF */ rvu_clear_msix_offset(rvu, pfvf, block, lf); } } static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; bool detach_all = true; struct rvu_block *block; int blkid; mutex_lock(&rvu->rsrc_lock); /* Check for partial resource detach */ if (detach && detach->partial) detach_all = false; /* Check for RVU block's LFs attached to this func, * if so, detach them. */ for (blkid = 0; blkid < BLK_COUNT; blkid++) { block = &hw->block[blkid]; if (!block->lf.bmap) continue; if (!detach_all && detach) { if (blkid == BLKADDR_NPA && !detach->npalf) continue; else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) continue; else if ((blkid == BLKADDR_NIX1) && !detach->nixlf) continue; else if ((blkid == BLKADDR_SSO) && !detach->sso) continue; else if ((blkid == BLKADDR_SSOW) && !detach->ssow) continue; else if ((blkid == BLKADDR_TIM) && !detach->timlfs) continue; else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) continue; else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs) continue; } rvu_detach_block(rvu, pcifunc, block->type); } mutex_unlock(&rvu->rsrc_lock); return 0; } int rvu_mbox_handler_detach_resources(struct rvu *rvu, struct rsrc_detach *detach, struct msg_rsp *rsp) { return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr = BLKADDR_NIX0, vf; struct rvu_pfvf *pf; pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { blkaddr = pf->nix_blkaddr; } else if (is_afvf(pcifunc)) { vf = pcifunc - 1; /* Assign NIX based on VF number. All even numbered VFs get * NIX0 and odd numbered gets NIX1 */ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; /* NIX1 is not present on all silicons */ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) blkaddr = BLKADDR_NIX0; } /* if SDP1 then the blkaddr is NIX1 */ if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) blkaddr = BLKADDR_NIX1; switch (blkaddr) { case BLKADDR_NIX1: pfvf->nix_blkaddr = BLKADDR_NIX1; pfvf->nix_rx_intf = NIX_INTFX_RX(1); pfvf->nix_tx_intf = NIX_INTFX_TX(1); break; case BLKADDR_NIX0: default: pfvf->nix_blkaddr = BLKADDR_NIX0; pfvf->nix_rx_intf = NIX_INTFX_RX(0); pfvf->nix_tx_intf = NIX_INTFX_TX(0); break; } return pfvf->nix_blkaddr; } static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc, struct rsrc_attach *attach) { int blkaddr; switch (blktype) { case BLKTYPE_NIX: blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); break; case BLKTYPE_CPT: if (attach->hdr.ver < RVU_MULTI_BLK_VER) return rvu_get_blkaddr(rvu, blktype, 0); blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr : BLKADDR_CPT0; if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) return -ENODEV; break; default: return rvu_get_blkaddr(rvu, blktype, 0); } if (is_block_implemented(rvu->hw, blkaddr)) return blkaddr; return -ENODEV; } static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype, int num_lfs, struct rsrc_attach *attach) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int slot, lf; int blkaddr; u64 cfg; if (!num_lfs) return; blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach); if (blkaddr < 0) return; block = &hw->block[blkaddr]; if (!block->lf.bmap) return; for (slot = 0; slot < num_lfs; slot++) { /* Allocate the resource */ lf = rvu_alloc_rsrc(&block->lf); if (lf < 0) return; cfg = (1ULL << 63) | (pcifunc << 8) | slot; rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), cfg); rvu_update_rsrc_map(rvu, pfvf, block, pcifunc, lf, true); /* Set start MSIX vector for this LF within this PF/VF */ rvu_set_msix_offset(rvu, pfvf, block, lf); } } static int rvu_check_rsrc_availability(struct rvu *rvu, struct rsrc_attach *req, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int free_lfs, mappedlfs, blkaddr; struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; /* Only one NPA LF can be attached */ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { block = &hw->block[BLKADDR_NPA]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) goto fail; } else if (req->npalf) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid req, already has NPA\n", pcifunc); return -EINVAL; } /* Only one NIX LF can be attached */ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, pcifunc, req); if (blkaddr < 0) return blkaddr; block = &hw->block[blkaddr]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) goto fail; } else if (req->nixlf) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid req, already has NIX\n", pcifunc); return -EINVAL; } if (req->sso) { block = &hw->block[BLKADDR_SSO]; /* Is request within limits ? */ if (req->sso > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid SSO req, %d > max %d\n", pcifunc, req->sso, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); /* Check if additional resources are available */ if (req->sso > mappedlfs && ((req->sso - mappedlfs) > free_lfs)) goto fail; } if (req->ssow) { block = &hw->block[BLKADDR_SSOW]; if (req->ssow > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid SSOW req, %d > max %d\n", pcifunc, req->sso, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->ssow > mappedlfs && ((req->ssow - mappedlfs) > free_lfs)) goto fail; } if (req->timlfs) { block = &hw->block[BLKADDR_TIM]; if (req->timlfs > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid TIMLF req, %d > max %d\n", pcifunc, req->timlfs, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->timlfs > mappedlfs && ((req->timlfs - mappedlfs) > free_lfs)) goto fail; } if (req->cptlfs) { blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT, pcifunc, req); if (blkaddr < 0) return blkaddr; block = &hw->block[blkaddr]; if (req->cptlfs > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid CPTLF req, %d > max %d\n", pcifunc, req->cptlfs, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->cptlfs > mappedlfs && ((req->cptlfs - mappedlfs) > free_lfs)) goto fail; } return 0; fail: dev_info(rvu->dev, "Request for %s failed\n", block->name); return -ENOSPC; } static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype, struct rsrc_attach *attach) { int blkaddr, num_lfs; blkaddr = rvu_get_attach_blkaddr(rvu, blktype, attach->hdr.pcifunc, attach); if (blkaddr < 0) return false; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc), blkaddr); /* Requester already has LFs from given block ? */ return !!num_lfs; } int rvu_mbox_handler_attach_resources(struct rvu *rvu, struct rsrc_attach *attach, struct msg_rsp *rsp) { u16 pcifunc = attach->hdr.pcifunc; int err; /* If first request, detach all existing attached resources */ if (!attach->modify) rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_lock(&rvu->rsrc_lock); /* Check if the request can be accommodated */ err = rvu_check_rsrc_availability(rvu, attach, pcifunc); if (err) goto exit; /* Now attach the requested resources */ if (attach->npalf) rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach); if (attach->nixlf) rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach); if (attach->sso) { /* RVU func doesn't know which exact LF or slot is attached * to it, it always sees as slot 0,1,2. So for a 'modify' * request, simply detach all existing attached LFs/slots * and attach a fresh. */ if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso, attach); } if (attach->ssow) { if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow, attach); } if (attach->timlfs) { if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs, attach); } if (attach->cptlfs) { if (attach->modify && rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach)) rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs, attach); } exit: mutex_unlock(&rvu->rsrc_lock); return err; } static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, int blkaddr, int lf) { u16 vec; if (lf < 0) return MSIX_VECTOR_INVALID; for (vec = 0; vec < pfvf->msix.max; vec++) { if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) return vec; } return MSIX_VECTOR_INVALID; } static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf) { u16 nvecs, vec, offset; u64 cfg; cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | (lf << block->lfshift)); nvecs = (cfg >> 12) & 0xFF; /* Check and alloc MSIX vectors, must be contiguous */ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) return; offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); /* Config MSIX offset in LF */ rvu_write64(rvu, block->addr, block->msixcfg_reg | (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); /* Update the bitmap as well */ for (vec = 0; vec < nvecs; vec++) pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); } static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf) { u16 nvecs, vec, offset; u64 cfg; cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | (lf << block->lfshift)); nvecs = (cfg >> 12) & 0xFF; /* Clear MSIX offset in LF */ rvu_write64(rvu, block->addr, block->msixcfg_reg | (lf << block->lfshift), cfg & ~0x7FFULL); offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); /* Update the mapping */ for (vec = 0; vec < nvecs; vec++) pfvf->msix_lfmap[offset + vec] = 0; /* Free the same in MSIX bitmap */ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); } int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, struct msix_offset_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int lf, slot, blkaddr; pfvf = rvu_get_pfvf(rvu, pcifunc); if (!pfvf->msix.bmap) return 0; /* Set MSIX offsets for each block's LFs attached to this PF/VF */ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); /* Get BLKADDR from which LFs are attached to pcifunc */ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) { rsp->nix_msixoff = MSIX_VECTOR_INVALID; } else { lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf); } rsp->sso = pfvf->sso; for (slot = 0; slot < rsp->sso; slot++) { lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); rsp->sso_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); } rsp->ssow = pfvf->ssow; for (slot = 0; slot < rsp->ssow; slot++) { lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); rsp->ssow_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); } rsp->timlfs = pfvf->timlfs; for (slot = 0; slot < rsp->timlfs; slot++) { lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); rsp->timlf_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); } rsp->cptlfs = pfvf->cptlfs; for (slot = 0; slot < rsp->cptlfs; slot++) { lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); rsp->cptlf_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); } rsp->cpt1_lfs = pfvf->cpt1_lfs; for (slot = 0; slot < rsp->cpt1_lfs; slot++) { lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot); rsp->cpt1_lf_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf); } return 0; } int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req, struct free_rsrcs_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; struct nix_txsch *txsch; struct nix_hw *nix_hw; mutex_lock(&rvu->rsrc_lock); block = &hw->block[BLKADDR_NPA]; rsp->npa = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_NIX0]; rsp->nix = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_NIX1]; rsp->nix1 = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_SSO]; rsp->sso = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_SSOW]; rsp->ssow = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_TIM]; rsp->tim = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_CPT0]; rsp->cpt = rvu_rsrc_free_count(&block->lf); block = &hw->block[BLKADDR_CPT1]; rsp->cpt1 = rvu_rsrc_free_count(&block->lf); if (rvu->hw->cap.nix_fixed_txschq_mapping) { rsp->schq[NIX_TXSCH_LVL_SMQ] = 1; rsp->schq[NIX_TXSCH_LVL_TL4] = 1; rsp->schq[NIX_TXSCH_LVL_TL3] = 1; rsp->schq[NIX_TXSCH_LVL_TL2] = 1; /* NIX1 */ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) goto out; rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1; rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1; rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1; rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1; } else { nix_hw = get_nix_hw(hw, BLKADDR_NIX0); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; rsp->schq[NIX_TXSCH_LVL_SMQ] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; rsp->schq[NIX_TXSCH_LVL_TL4] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; rsp->schq[NIX_TXSCH_LVL_TL3] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; rsp->schq[NIX_TXSCH_LVL_TL2] = rvu_rsrc_free_count(&txsch->schq); if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) goto out; nix_hw = get_nix_hw(hw, BLKADDR_NIX1); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = rvu_rsrc_free_count(&txsch->schq); txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = rvu_rsrc_free_count(&txsch->schq); } rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1; out: rsp->schq[NIX_TXSCH_LVL_TL1] = 1; mutex_unlock(&rvu->rsrc_lock); return 0; } int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; u16 vf, numvfs; u64 cfg; vf = pcifunc & RVU_PFVF_FUNC_MASK; cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); numvfs = (cfg >> 12) & 0xFF; if (vf && vf <= numvfs) __rvu_flr_handler(rvu, pcifunc); else return RVU_INVALID_VF_ID; return 0; } int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, struct get_hw_cap_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping; rsp->nix_shaping = hw->cap.nix_shaping; rsp->npc_hash_extract = hw->cap.npc_hash_extract; return 0; } int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int blkaddr, nixlf; u16 target; /* Only PF can add VF permissions */ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc)) return -EOPNOTSUPP; target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1); pfvf = rvu_get_pfvf(rvu, target); if (req->flags & RESET_VF_PERM) { pfvf->flags &= RVU_CLEAR_VF_PERM; } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^ (req->flags & VF_TRUSTED)) { change_bit(PF_SET_VF_TRUSTED, &pfvf->flags); /* disable multicast and promisc entries */ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) { blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target); if (blkaddr < 0) return 0; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], target, 0); if (nixlf < 0) return 0; npc_enadis_default_mce_entry(rvu, target, nixlf, NIXLF_ALLMULTI_ENTRY, false); npc_enadis_default_mce_entry(rvu, target, nixlf, NIXLF_PROMISC_ENTRY, false); } } return 0; } static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { struct rvu *rvu = pci_get_drvdata(mbox->pdev); /* Check if valid, if not reply with a invalid msg */ if (req->sig != OTX2_MBOX_REQ_SIG) goto bad_message; switch (req->id) { #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ case _id: { \ struct _rsp_type *rsp; \ int err; \ \ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ mbox, devid, \ sizeof(struct _rsp_type)); \ /* some handlers should complete even if reply */ \ /* could not be allocated */ \ if (!rsp && \ _id != MBOX_MSG_DETACH_RESOURCES && \ _id != MBOX_MSG_NIX_TXSCH_FREE && \ _id != MBOX_MSG_VF_FLR) \ return -ENOMEM; \ if (rsp) { \ rsp->hdr.id = _id; \ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ rsp->hdr.pcifunc = req->pcifunc; \ rsp->hdr.rc = 0; \ } \ \ err = rvu_mbox_handler_ ## _fn_name(rvu, \ (struct _req_type *)req, \ rsp); \ if (rsp && err) \ rsp->hdr.rc = err; \ \ trace_otx2_msg_process(mbox->pdev, _id, err); \ return rsp ? err : -ENOMEM; \ } MBOX_MESSAGES #undef M bad_message: default: otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); return -ENODEV; } } static void __rvu_mbox_handler(struct rvu_work *mwork, int type) { struct rvu *rvu = mwork->rvu; int offset, err, id, devid; struct otx2_mbox_dev *mdev; struct mbox_hdr *req_hdr; struct mbox_msghdr *msg; struct mbox_wq_info *mw; struct otx2_mbox *mbox; switch (type) { case TYPE_AFPF: mw = &rvu->afpf_wq_info; break; case TYPE_AFVF: mw = &rvu->afvf_wq_info; break; default: return; } devid = mwork - mw->mbox_wrk; mbox = &mw->mbox; mdev = &mbox->dev[devid]; /* Process received mbox messages */ req_hdr = mdev->mbase + mbox->rx_start; if (mw->mbox_wrk[devid].num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { msg = mdev->mbase + offset; /* Set which PF/VF sent this message based on mbox IRQ */ switch (type) { case TYPE_AFPF: msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); break; case TYPE_AFVF: msg->pcifunc &= ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; break; } err = rvu_process_mbox_msg(mbox, devid, msg); if (!err) { offset = mbox->rx_start + msg->next_msgoff; continue; } if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, rvu_get_pf(msg->pcifunc), (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, devid); } mw->mbox_wrk[devid].num_msgs = 0; /* Send mbox responses to VF/PF */ otx2_mbox_msg_send(mbox, devid); } static inline void rvu_afpf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); __rvu_mbox_handler(mwork, TYPE_AFPF); } static inline void rvu_afvf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); __rvu_mbox_handler(mwork, TYPE_AFVF); } static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) { struct rvu *rvu = mwork->rvu; struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct mbox_wq_info *mw; struct otx2_mbox *mbox; int offset, id, devid; switch (type) { case TYPE_AFPF: mw = &rvu->afpf_wq_info; break; case TYPE_AFVF: mw = &rvu->afvf_wq_info; break; default: return; } devid = mwork - mw->mbox_wrk_up; mbox = &mw->mbox_up; mdev = &mbox->dev[devid]; rsp_hdr = mdev->mbase + mbox->rx_start; if (mw->mbox_wrk_up[devid].up_num_msgs == 0) { dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n"); return; } offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) { msg = mdev->mbase + offset; if (msg->id >= MBOX_MSG_MAX) { dev_err(rvu->dev, "Mbox msg with unknown ID 0x%x\n", msg->id); goto end; } if (msg->sig != OTX2_MBOX_RSP_SIG) { dev_err(rvu->dev, "Mbox msg with wrong signature %x, ID 0x%x\n", msg->sig, msg->id); goto end; } switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: break; default: if (msg->rc) dev_err(rvu->dev, "Mbox msg response has err %d, ID 0x%x\n", msg->rc, msg->id); break; } end: offset = mbox->rx_start + msg->next_msgoff; mdev->msgs_acked++; } mw->mbox_wrk_up[devid].up_num_msgs = 0; otx2_mbox_reset(mbox, devid); } static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); __rvu_mbox_up_handler(mwork, TYPE_AFPF); } static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); __rvu_mbox_up_handler(mwork, TYPE_AFVF); } static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, int num, int type, unsigned long *pf_bmap) { struct rvu_hwinfo *hw = rvu->hw; int region; u64 bar4; /* For cn10k platform VF mailbox regions of a PF follows after the * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from * RVU_PF_VF_BAR4_ADDR register. */ if (type == TYPE_AFVF) { for (region = 0; region < num; region++) { if (!test_bit(region, pf_bmap)) continue; if (hw->cap.per_pf_mbox_regs) { bar4 = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFX_BAR4_ADDR(0)) + MBOX_SIZE; bar4 += region * MBOX_SIZE; } else { bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } return 0; } /* For cn10k platform AF <-> PF mailbox region of a PF is read from per * PF registers. Whereas for Octeontx2 it is read from * RVU_AF_PF_BAR4_ADDR register. */ for (region = 0; region < num; region++) { if (!test_bit(region, pf_bmap)) continue; if (hw->cap.per_pf_mbox_regs) { bar4 = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFX_BAR4_ADDR(region)); } else { bar4 = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } return 0; error: while (region--) iounmap((void __iomem *)mbox_addr[region]); return -ENOMEM; } static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) { int err = -EINVAL, i, dir, dir_up; void __iomem *reg_base; struct rvu_work *mwork; unsigned long *pf_bmap; void **mbox_regions; const char *name; u64 cfg; pf_bmap = bitmap_zalloc(num, GFP_KERNEL); if (!pf_bmap) return -ENOMEM; /* RVU VFs */ if (type == TYPE_AFVF) bitmap_set(pf_bmap, 0, num); if (type == TYPE_AFPF) { /* Mark enabled PFs in bitmap */ for (i = 0; i < num; i++) { cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i)); if (cfg & BIT_ULL(20)) set_bit(i, pf_bmap); } } mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; goto free_bitmap; } switch (type) { case TYPE_AFPF: name = "rvu_afpf_mailbox"; dir = MBOX_DIR_AFPF; dir_up = MBOX_DIR_AFPF_UP; reg_base = rvu->afreg_base; err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap); if (err) goto free_regions; break; case TYPE_AFVF: name = "rvu_afvf_mailbox"; dir = MBOX_DIR_PFVF; dir_up = MBOX_DIR_PFVF_UP; reg_base = rvu->pfreg_base; err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap); if (err) goto free_regions; break; default: goto free_regions; } mw->mbox_wq = alloc_workqueue(name, WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, num); if (!mw->mbox_wq) { err = -ENOMEM; goto unmap_regions; } mw->mbox_wrk = devm_kcalloc(rvu->dev, num, sizeof(struct rvu_work), GFP_KERNEL); if (!mw->mbox_wrk) { err = -ENOMEM; goto exit; } mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, sizeof(struct rvu_work), GFP_KERNEL); if (!mw->mbox_wrk_up) { err = -ENOMEM; goto exit; } err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, reg_base, dir, num, pf_bmap); if (err) goto exit; err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, reg_base, dir_up, num, pf_bmap); if (err) goto exit; for (i = 0; i < num; i++) { if (!test_bit(i, pf_bmap)) continue; mwork = &mw->mbox_wrk[i]; mwork->rvu = rvu; INIT_WORK(&mwork->work, mbox_handler); mwork = &mw->mbox_wrk_up[i]; mwork->rvu = rvu; INIT_WORK(&mwork->work, mbox_up_handler); } goto free_regions; exit: destroy_workqueue(mw->mbox_wq); unmap_regions: while (num--) iounmap((void __iomem *)mbox_regions[num]); free_regions: kfree(mbox_regions); free_bitmap: bitmap_free(pf_bmap); return err; } static void rvu_mbox_destroy(struct mbox_wq_info *mw) { struct otx2_mbox *mbox = &mw->mbox; struct otx2_mbox_dev *mdev; int devid; if (mw->mbox_wq) { destroy_workqueue(mw->mbox_wq); mw->mbox_wq = NULL; } for (devid = 0; devid < mbox->ndevs; devid++) { mdev = &mbox->dev[devid]; if (mdev->hwbase) iounmap((void __iomem *)mdev->hwbase); } otx2_mbox_destroy(&mw->mbox); otx2_mbox_destroy(&mw->mbox_up); } static void rvu_queue_work(struct mbox_wq_info *mw, int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; int i; for (i = first; i < mdevs; i++) { /* start from 0 */ if (!(intr & BIT_ULL(i - first))) continue; mbox = &mw->mbox; mdev = &mbox->dev[i]; hdr = mdev->mbase + mbox->rx_start; /*The hdr->num_msgs is set to zero immediately in the interrupt * handler to ensure that it holds a correct value next time * when the interrupt handler is called. * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler * pf>mbox.up_num_msgs holds the data for use in * pfaf_mbox_up_handler. */ if (hdr->num_msgs) { mw->mbox_wrk[i].num_msgs = hdr->num_msgs; hdr->num_msgs = 0; queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); } mbox = &mw->mbox_up; mdev = &mbox->dev[i]; hdr = mdev->mbase + mbox->rx_start; if (hdr->num_msgs) { mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs; hdr->num_msgs = 0; queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); } } } static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; int vfs = rvu->vfs; u64 intr; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); /* Clear interrupts */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); if (intr) trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); /* Sync with mbox memory region */ rmb(); rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); /* Handle VF interrupts */ if (vfs > 64) { intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); vfs -= 64; } intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); if (intr) trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); return IRQ_HANDLED; } static void rvu_enable_mbox_intr(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; /* Clear spurious irqs, if any */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, INTR_MASK(hw->total_pfs) & ~1ULL); } static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) { struct rvu_block *block; int slot, lf, num_lfs; int err; block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), block->addr); if (!num_lfs) return; for (slot = 0; slot < num_lfs; slot++) { lf = rvu_get_lf(rvu, block, pcifunc, slot); if (lf < 0) continue; /* Cleanup LF and reset it */ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1) rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); else if (block->addr == BLKADDR_NPA) rvu_npa_lf_teardown(rvu, pcifunc, lf); else if ((block->addr == BLKADDR_CPT0) || (block->addr == BLKADDR_CPT1)) rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf, slot); err = rvu_lf_reset(rvu, block, lf); if (err) { dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", block->addr, lf); } } } static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) { if (rvu_npc_exact_has_match_table(rvu)) rvu_npc_exact_reset(rvu, pcifunc); mutex_lock(&rvu->flr_lock); /* Reset order should reflect inter-block dependencies: * 1. Reset any packet/work sources (NIX, CPT, TIM) * 2. Flush and reset SSO/SSOW * 3. Cleanup pools (NPA) */ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM * entries, check and free the MCAM entries explicitly to avoid leak. * Since LF is detached use LF number as -1. */ rvu_npc_free_mcam_entries(rvu, pcifunc, -1); rvu_mac_reset(rvu, pcifunc); mutex_unlock(&rvu->flr_lock); } static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) { int reg = 0; /* pcifunc = 0(PF0) | (vf + 1) */ __rvu_flr_handler(rvu, vf + 1); if (vf >= 64) { reg = 1; vf = vf - 64; } /* Signal FLR finish and enable IRQ */ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); } static void rvu_flr_handler(struct work_struct *work) { struct rvu_work *flrwork = container_of(work, struct rvu_work, work); struct rvu *rvu = flrwork->rvu; u16 pcifunc, numvfs, vf; u64 cfg; int pf; pf = flrwork - rvu->flr_wrk; if (pf >= rvu->hw->total_pfs) { rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); return; } cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; pcifunc = pf << RVU_PFVF_PF_SHIFT; for (vf = 0; vf < numvfs; vf++) __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); __rvu_flr_handler(rvu, pcifunc); /* Signal FLR finish */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); /* Enable interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); } static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) { int dev, vf, reg = 0; u64 intr; if (start_vf >= 64) reg = 1; intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); if (!intr) return; for (vf = 0; vf < numvfs; vf++) { if (!(intr & BIT_ULL(vf))) continue; /* Clear and disable the interrupt */ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); dev = vf + start_vf + rvu->hw->total_pfs; queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); } } static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; u64 intr; u8 pf; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); if (!intr) goto afvf_flr; for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (intr & (1ULL << pf)) { /* clear interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, BIT_ULL(pf)); /* Disable the interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, BIT_ULL(pf)); /* PF is already dead do only AF related operations */ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); } } afvf_flr: rvu_afvf_queue_flr_work(rvu, 0, 64); if (rvu->vfs > 64) rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); return IRQ_HANDLED; } static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) { int vf; /* Nothing to be done here other than clearing the * TRPEND bit. */ for (vf = 0; vf < 64; vf++) { if (intr & (1ULL << vf)) { /* clear the trpend due to ME(master enable) */ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); /* clear interrupt */ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); } } } /* Handles ME interrupts from VFs of AF */ static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; int vfset; u64 intr; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); for (vfset = 0; vfset <= 1; vfset++) { intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); if (intr) rvu_me_handle_vfset(rvu, vfset, intr); } return IRQ_HANDLED; } /* Handles ME interrupts from PFs */ static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; u64 intr; u8 pf; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); /* Nothing to be done here other than clearing the * TRPEND bit. */ for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (intr & (1ULL << pf)) { /* clear the trpend due to ME(master enable) */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); /* clear interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, BIT_ULL(pf)); } } return IRQ_HANDLED; } static void rvu_unregister_interrupts(struct rvu *rvu) { int irq; rvu_cpt_unregister_interrupts(rvu); /* Disable the Mbox interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); /* Disable the PF FLR interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); /* Disable the PF ME interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); for (irq = 0; irq < rvu->num_vec; irq++) { if (rvu->irq_allocated[irq]) { free_irq(pci_irq_vector(rvu->pdev, irq), rvu); rvu->irq_allocated[irq] = false; } } pci_free_irq_vectors(rvu->pdev); rvu->num_vec = 0; } static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) { struct rvu_pfvf *pfvf = &rvu->pf[0]; int offset; pfvf = &rvu->pf[0]; offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; /* Make sure there are enough MSIX vectors configured so that * VF interrupts can be handled. Offset equal to zero means * that PF vectors are not configured and overlapping AF vectors. */ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && offset; } static int rvu_register_interrupts(struct rvu *rvu) { int ret, offset, pf_vec_start; rvu->num_vec = pci_msix_vec_count(rvu->pdev); rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, NAME_SIZE, GFP_KERNEL); if (!rvu->irq_name) return -ENOMEM; rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, sizeof(bool), GFP_KERNEL); if (!rvu->irq_allocated) return -ENOMEM; /* Enable MSI-X */ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, rvu->num_vec, PCI_IRQ_MSIX); if (ret < 0) { dev_err(rvu->dev, "RVUAF: Request for %d msix vectors failed, ret %d\n", rvu->num_vec, ret); return ret; } /* Register mailbox interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), rvu_mbox_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for mbox irq\n"); goto fail; } rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); /* Register FLR interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], "RVUAF FLR"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), rvu_flr_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for FLR\n"); goto fail; } rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; /* Enable FLR interrupt for all PFs*/ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); /* Register ME interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], "RVUAF ME"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), rvu_me_pf_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for ME\n"); } rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; /* Clear TRPEND bit for all PF */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs)); /* Enable ME interrupt for all PFs*/ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); if (!rvu_afvf_msix_vectors_num_ok(rvu)) return 0; /* Get PF MSIX vectors offset. */ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; /* Register MBOX0 interrupt. */ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_mbox_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) dev_err(rvu->dev, "RVUAF: IRQ registration failed for Mbox0\n"); rvu->irq_allocated[offset] = true; /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so * simply increment current offset by 1. */ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_mbox_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) dev_err(rvu->dev, "RVUAF: IRQ registration failed for Mbox1\n"); rvu->irq_allocated[offset] = true; /* Register FLR interrupt handler for AF's VFs */ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_flr_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); goto fail; } rvu->irq_allocated[offset] = true; offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_flr_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); goto fail; } rvu->irq_allocated[offset] = true; /* Register ME interrupt handler for AF's VFs */ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_me_vf_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); goto fail; } rvu->irq_allocated[offset] = true; offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); ret = request_irq(pci_irq_vector(rvu->pdev, offset), rvu_me_vf_intr_handler, 0, &rvu->irq_name[offset * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); goto fail; } rvu->irq_allocated[offset] = true; ret = rvu_cpt_register_interrupts(rvu); if (ret) goto fail; return 0; fail: rvu_unregister_interrupts(rvu); return ret; } static void rvu_flr_wq_destroy(struct rvu *rvu) { if (rvu->flr_wq) { destroy_workqueue(rvu->flr_wq); rvu->flr_wq = NULL; } } static int rvu_flr_init(struct rvu *rvu) { int dev, num_devs; u64 cfg; int pf; /* Enable FLR for all PFs*/ for (pf = 0; pf < rvu->hw->total_pfs; pf++) { cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), cfg | BIT_ULL(22)); } rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!rvu->flr_wq) return -ENOMEM; num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, sizeof(struct rvu_work), GFP_KERNEL); if (!rvu->flr_wrk) { destroy_workqueue(rvu->flr_wq); return -ENOMEM; } for (dev = 0; dev < num_devs; dev++) { rvu->flr_wrk[dev].rvu = rvu; INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); } mutex_init(&rvu->flr_lock); return 0; } static void rvu_disable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); if (vfs <= 64) return; rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); } static void rvu_enable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; /* Clear any pending interrupts and enable AF VF interrupts for * the first 64 VFs. */ /* Mbox */ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); /* FLR */ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); /* Same for remaining VFs, if any. */ if (vfs <= 64) return; rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); } int rvu_get_num_lbk_chans(void) { struct pci_dev *pdev; void __iomem *base; int ret = -EIO; pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, NULL); if (!pdev) goto err; base = pci_ioremap_bar(pdev, 0); if (!base) goto err_put; /* Read number of available LBK channels from LBK(0)_CONST register. */ ret = (readq(base + 0x10) >> 32) & 0xffff; iounmap(base); err_put: pci_dev_put(pdev); err: return ret; } static int rvu_enable_sriov(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; int err, chans, vfs; if (!rvu_afvf_msix_vectors_num_ok(rvu)) { dev_warn(&pdev->dev, "Skipping SRIOV enablement since not enough IRQs are available\n"); return 0; } chans = rvu_get_num_lbk_chans(); if (chans < 0) return chans; vfs = pci_sriov_get_totalvfs(pdev); /* Limit VFs in case we have more VFs than LBK channels available. */ if (vfs > chans) vfs = chans; if (!vfs) return 0; /* LBK channel number 63 is used for switching packets between * CGX mapped VFs. Hence limit LBK pairs till 62 only. */ if (vfs > 62) vfs = 62; /* Save VFs number for reference in VF interrupts handlers. * Since interrupts might start arriving during SRIOV enablement * ordinary API cannot be used to get number of enabled VFs. */ rvu->vfs = vfs; err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); if (err) return err; rvu_enable_afvf_intr(rvu); /* Make sure IRQs are enabled before SRIOV. */ mb(); err = pci_enable_sriov(pdev, vfs); if (err) { rvu_disable_afvf_intr(rvu); rvu_mbox_destroy(&rvu->afvf_wq_info); return err; } return 0; } static void rvu_disable_sriov(struct rvu *rvu) { rvu_disable_afvf_intr(rvu); rvu_mbox_destroy(&rvu->afvf_wq_info); pci_disable_sriov(rvu->pdev); } static void rvu_update_module_params(struct rvu *rvu) { const char *default_pfl_name = "default"; strscpy(rvu->mkex_pfl_name, mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); strscpy(rvu->kpu_pfl_name, kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN); } static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; struct rvu *rvu; int err; rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); if (!rvu) return -ENOMEM; rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); if (!rvu->hw) { devm_kfree(dev, rvu); return -ENOMEM; } pci_set_drvdata(pdev, rvu); rvu->pdev = pdev; rvu->dev = &pdev->dev; err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); goto err_freemem; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); goto err_disable_device; } err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); goto err_release_regions; } pci_set_master(pdev); rvu->ptp = ptp_get(); if (IS_ERR(rvu->ptp)) { err = PTR_ERR(rvu->ptp); if (err) goto err_release_regions; rvu->ptp = NULL; } /* Map Admin function CSRs */ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); if (!rvu->afreg_base || !rvu->pfreg_base) { dev_err(dev, "Unable to map admin function CSRs, aborting\n"); err = -ENOMEM; goto err_put_ptp; } /* Store module params in rvu structure */ rvu_update_module_params(rvu); /* Check which blocks the HW supports */ rvu_check_block_implemented(rvu); rvu_reset_all_blocks(rvu); rvu_setup_hw_capabilities(rvu); err = rvu_setup_hw_resources(rvu); if (err) goto err_put_ptp; /* Init mailbox btw AF and PFs */ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, rvu->hw->total_pfs, rvu_afpf_mbox_handler, rvu_afpf_mbox_up_handler); if (err) { dev_err(dev, "%s: Failed to initialize mbox\n", __func__); goto err_hwsetup; } err = rvu_flr_init(rvu); if (err) { dev_err(dev, "%s: Failed to initialize flr\n", __func__); goto err_mbox; } err = rvu_register_interrupts(rvu); if (err) { dev_err(dev, "%s: Failed to register interrupts\n", __func__); goto err_flr; } err = rvu_register_dl(rvu); if (err) { dev_err(dev, "%s: Failed to register devlink\n", __func__); goto err_irq; } rvu_setup_rvum_blk_revid(rvu); /* Enable AF's VFs (if any) */ err = rvu_enable_sriov(rvu); if (err) { dev_err(dev, "%s: Failed to enable sriov\n", __func__); goto err_dl; } /* Initialize debugfs */ rvu_dbg_init(rvu); mutex_init(&rvu->rswitch.switch_lock); if (rvu->fwdata) ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); return 0; err_dl: rvu_unregister_dl(rvu); err_irq: rvu_unregister_interrupts(rvu); err_flr: rvu_flr_wq_destroy(rvu); err_mbox: rvu_mbox_destroy(&rvu->afpf_wq_info); err_hwsetup: rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); rvu_mcs_exit(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); rvu_clear_rvum_blk_revid(rvu); err_put_ptp: ptp_put(rvu->ptp); err_release_regions: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); err_freemem: pci_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, rvu->hw); devm_kfree(dev, rvu); return err; } static void rvu_remove(struct pci_dev *pdev) { struct rvu *rvu = pci_get_drvdata(pdev); rvu_dbg_exit(rvu); rvu_unregister_dl(rvu); rvu_unregister_interrupts(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); rvu_mcs_exit(rvu); rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_disable_sriov(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); rvu_clear_rvum_blk_revid(rvu); ptp_put(rvu->ptp); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, rvu->hw); devm_kfree(&pdev->dev, rvu); } static struct pci_driver rvu_driver = { .name = DRV_NAME, .id_table = rvu_id_table, .probe = rvu_probe, .remove = rvu_remove, }; static int __init rvu_init_module(void) { int err; pr_info("%s: %s\n", DRV_NAME, DRV_STRING); err = pci_register_driver(&cgx_driver); if (err < 0) return err; err = pci_register_driver(&ptp_driver); if (err < 0) goto ptp_err; err = pci_register_driver(&mcs_driver); if (err < 0) goto mcs_err; err = pci_register_driver(&rvu_driver); if (err < 0) goto rvu_err; return 0; rvu_err: pci_unregister_driver(&mcs_driver); mcs_err: pci_unregister_driver(&ptp_driver); ptp_err: pci_unregister_driver(&cgx_driver); return err; } static void __exit rvu_cleanup_module(void) { pci_unregister_driver(&rvu_driver); pci_unregister_driver(&mcs_driver); pci_unregister_driver(&ptp_driver); pci_unregister_driver(&cgx_driver); } module_init(rvu_init_module); module_exit(rvu_cleanup_module);
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell CN10K MCS driver * * Copyright (C) 2022 Marvell. */ #include <linux/types.h> #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include "mcs.h" #include "rvu.h" #include "mcs_reg.h" #include "lmac_common.h" #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ { \ struct _req_type *req; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ sizeof(struct _rsp_type)); \ if (!req) \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ return req; \ } MBOX_UP_MCS_MESSAGES #undef M void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena) { struct mcs *mcs; u64 cfg; u8 port; if (!rvu->mcs_blk_cnt) return; /* When ptp is enabled, RPM appends 8B header for all * RX packets. MCS PEX need to configure to skip 8B * during packet parsing. */ /* CNF10K-B */ if (rvu->mcs_blk_cnt > 1) { mcs = mcs_get_pdata(rpm_id); cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION); if (ena) cfg |= BIT_ULL(lmac_id); else cfg &= ~BIT_ULL(lmac_id); mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg); return; } /* CN10KB */ mcs = mcs_get_pdata(0); port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id; cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port)); if (ena) cfg |= BIT_ULL(0); else cfg &= ~BIT_ULL(0); mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg); } int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu, struct mcs_set_lmac_mode *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap) mcs_set_lmac_mode(mcs, req->lmac_id, req->mode); return 0; } int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) { struct mcs_intrq_entry *qentry; u16 pcifunc = event->pcifunc; struct rvu *rvu = mcs->rvu; struct mcs_pfvf *pfvf; /* Check if it is PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; event->intr_mask &= pfvf->intr_mask; /* Check PF/VF interrupt notification is enabled */ if (!(pfvf->intr_mask && event->intr_mask)) return 0; qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); if (!qentry) return -ENOMEM; qentry->intr_event = *event; spin_lock(&rvu->mcs_intrq_lock); list_add_tail(&qentry->node, &rvu->mcs_intrq_head); spin_unlock(&rvu->mcs_intrq_lock); queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work); return 0; } static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) { struct mcs_intr_info *req; int err, pf; pf = rvu_get_pf(event->pcifunc); req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf); if (!req) return -ENOMEM; req->mcs_id = event->mcs_id; req->intr_mask = event->intr_mask; req->sa_id = event->sa_id; req->hdr.pcifunc = event->pcifunc; req->lmac_id = event->lmac_id; otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf); err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); if (err) dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf); return 0; } static void mcs_intr_handler_task(struct work_struct *work) { struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work); struct mcs_intrq_entry *qentry; struct mcs_intr_event *event; unsigned long flags; do { spin_lock_irqsave(&rvu->mcs_intrq_lock, flags); qentry = list_first_entry_or_null(&rvu->mcs_intrq_head, struct mcs_intrq_entry, node); if (qentry) list_del(&qentry->node); spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags); if (!qentry) break; /* nothing more to process */ event = &qentry->intr_event; mcs_notify_pfvf(event, rvu); kfree(qentry); } while (1); } int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu, struct mcs_intr_cfg *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct mcs_pfvf *pfvf; struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); /* Check if it is PF or VF */ if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; mcs->pf_map[0] = pcifunc; pfvf->intr_mask = req->intr_mask; return 0; } int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu, struct msg_req *req, struct mcs_hw_info *rsp) { struct mcs *mcs; if (!rvu->mcs_blk_cnt) return MCS_AF_ERR_NOT_MAPPED; /* MCS resources are same across all blocks */ mcs = mcs_get_pdata(0); rsp->num_mcs_blks = rvu->mcs_blk_cnt; rsp->tcam_entries = mcs->hw->tcam_entries; rsp->secy_entries = mcs->hw->secy_entries; rsp->sc_entries = mcs->hw->sc_entries; rsp->sa_entries = mcs->hw->sa_entries; return 0; } int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_reset_port(mcs, req->port_id, req->reset); return 0; } int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu, struct mcs_clear_stats *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mutex_lock(&mcs->stats_lock); if (req->all) mcs_clear_all_stats(mcs, pcifunc, req->dir); else mcs_clear_stats(mcs, req->type, req->id, req->dir); mutex_unlock(&mcs->stats_lock); return 0; } int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu, struct mcs_stats_req *req, struct mcs_flowid_stats *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); /* In CNF10K-B, before reading the statistics, * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set * to get accurate statistics */ if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, true); mutex_lock(&mcs->stats_lock); mcs_get_flowid_stats(mcs, rsp, req->id, req->dir); mutex_unlock(&mcs->stats_lock); /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading * the statistics */ if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, false); return 0; } int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu, struct mcs_stats_req *req, struct mcs_secy_stats *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, true); mutex_lock(&mcs->stats_lock); if (req->dir == MCS_RX) mcs_get_rx_secy_stats(mcs, rsp, req->id); else mcs_get_tx_secy_stats(mcs, rsp, req->id); mutex_unlock(&mcs->stats_lock); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, false); return 0; } int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu, struct mcs_stats_req *req, struct mcs_sc_stats *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, true); mutex_lock(&mcs->stats_lock); mcs_get_sc_stats(mcs, rsp, req->id, req->dir); mutex_unlock(&mcs->stats_lock); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, false); return 0; } int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu, struct mcs_stats_req *req, struct mcs_sa_stats *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, true); mutex_lock(&mcs->stats_lock); mcs_get_sa_stats(mcs, rsp, req->id, req->dir); mutex_unlock(&mcs->stats_lock); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, false); return 0; } int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu, struct mcs_stats_req *req, struct mcs_port_stats *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, true); mutex_lock(&mcs->stats_lock); mcs_get_port_stats(mcs, rsp, req->id, req->dir); mutex_unlock(&mcs->stats_lock); if (mcs->hw->mcs_blks > 1) mcs_set_force_clk_en(mcs, false); return 0; } int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu, struct mcs_set_active_lmac *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (!mcs) return MCS_AF_ERR_NOT_MAPPED; mcs->hw->lmac_bmap = req->lmac_bmap; mcs_set_lmac_channels(req->mcs_id, req->chan_base); return 0; } int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id))) return -EINVAL; mcs_set_port_cfg(mcs, req); return 0; } int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req, struct mcs_port_cfg_get_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id))) return -EINVAL; mcs_get_port_cfg(mcs, req, rsp); return 0; } int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req, struct mcs_custom_tag_cfg_get_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_get_custom_tag_cfg(mcs, req, rsp); return 0; } int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc) { struct mcs *mcs; int mcs_id; /* CNF10K-B mcs0-6 are mapped to RPM2-8*/ if (rvu->mcs_blk_cnt > 1) { for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) { mcs = mcs_get_pdata(mcs_id); mcs_free_all_rsrc(mcs, MCS_RX, pcifunc); mcs_free_all_rsrc(mcs, MCS_TX, pcifunc); } } else { /* CN10K-B has only one mcs block */ mcs = mcs_get_pdata(0); mcs_free_all_rsrc(mcs, MCS_RX, pcifunc); mcs_free_all_rsrc(mcs, MCS_TX, pcifunc); } return 0; } int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu, struct mcs_flowid_ena_dis_entry *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena); return 0; } int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu, struct mcs_pn_table_write_req *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir); return 0; } int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu, struct mcs_set_pn_threshold *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_pn_threshold_set(mcs, req); return 0; } int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu, struct mcs_rx_sc_sa_map *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req); return 0; } int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu, struct mcs_tx_sc_sa_map *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req); mcs->tx_sa_active[req->sc_id] = req->tx_sa_active; return 0; } int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu, struct mcs_sa_plcy_write_req *req, struct msg_rsp *rsp) { struct mcs *mcs; int i; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); for (i = 0; i < req->sa_cnt; i++) mcs_sa_plcy_write(mcs, &req->plcy[i][0], req->sa_index[i], req->dir); return 0; } int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu, struct mcs_rx_sc_cam_write_req *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id); return 0; } int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu, struct mcs_secy_plcy_write_req *req, struct msg_rsp *rsp) { struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mcs_secy_plcy_write(mcs, req->plcy, req->secy_id, req->dir); return 0; } int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu, struct mcs_flowid_entry_write_req *req, struct msg_rsp *rsp) { struct secy_mem_map map; struct mcs *mcs; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); /* TODO validate the flowid */ mcs_flowid_entry_write(mcs, req->data, req->mask, req->flow_id, req->dir); map.secy = req->secy_id; map.sc = req->sc_id; map.ctrl_pkt = req->ctrl_pkt; map.flow_id = req->flow_id; map.sci = req->sci; mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir); if (req->ena) mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, true); return 0; } int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu, struct mcs_free_rsrc_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; struct mcs *mcs; int rc = 0; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (req->dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; mutex_lock(&rvu->rsrc_lock); /* Free all the cam resources mapped to PF/VF */ if (req->all) { rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc); goto exit; } switch (req->rsrc_type) { case MCS_RSRC_TYPE_FLOWID: rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc); mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false); break; case MCS_RSRC_TYPE_SECY: rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc); mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir); break; case MCS_RSRC_TYPE_SC: rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc); /* Disable SC CAM only on RX side */ if (req->dir == MCS_RX) mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false); break; case MCS_RSRC_TYPE_SA: rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc); break; } exit: mutex_unlock(&rvu->rsrc_lock); return rc; } int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu, struct mcs_alloc_rsrc_req *req, struct mcs_alloc_rsrc_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; struct mcs *mcs; int rsrc_id, i; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); if (req->dir == MCS_RX) map = &mcs->rx; else map = &mcs->tx; mutex_lock(&rvu->rsrc_lock); if (req->all) { rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0], &rsp->secy_ids[0], &rsp->sc_ids[0], &rsp->sa_ids[0], &rsp->sa_ids[1], pcifunc, req->dir); goto exit; } switch (req->rsrc_type) { case MCS_RSRC_TYPE_FLOWID: for (i = 0; i < req->rsrc_cnt; i++) { rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc); if (rsrc_id < 0) goto exit; rsp->flow_ids[i] = rsrc_id; rsp->rsrc_cnt++; } break; case MCS_RSRC_TYPE_SECY: for (i = 0; i < req->rsrc_cnt; i++) { rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc); if (rsrc_id < 0) goto exit; rsp->secy_ids[i] = rsrc_id; rsp->rsrc_cnt++; } break; case MCS_RSRC_TYPE_SC: for (i = 0; i < req->rsrc_cnt; i++) { rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc); if (rsrc_id < 0) goto exit; rsp->sc_ids[i] = rsrc_id; rsp->rsrc_cnt++; } break; case MCS_RSRC_TYPE_SA: for (i = 0; i < req->rsrc_cnt; i++) { rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc); if (rsrc_id < 0) goto exit; rsp->sa_ids[i] = rsrc_id; rsp->rsrc_cnt++; } break; } rsp->rsrc_type = req->rsrc_type; rsp->dir = req->dir; rsp->mcs_id = req->mcs_id; rsp->all = req->all; exit: if (rsrc_id < 0) dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc); mutex_unlock(&rvu->rsrc_lock); return 0; } int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu, struct mcs_alloc_ctrl_pkt_rule_req *req, struct mcs_alloc_ctrl_pkt_rule_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct mcs_rsrc_map *map; struct mcs *mcs; int rsrc_id; u16 offset; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx; mutex_lock(&rvu->rsrc_lock); switch (req->rule_type) { case MCS_CTRL_PKT_RULE_TYPE_ETH: offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET; break; case MCS_CTRL_PKT_RULE_TYPE_DA: offset = MCS_CTRLPKT_DA_RULE_OFFSET; break; case MCS_CTRL_PKT_RULE_TYPE_RANGE: offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET; break; case MCS_CTRL_PKT_RULE_TYPE_COMBO: offset = MCS_CTRLPKT_COMBO_RULE_OFFSET; break; case MCS_CTRL_PKT_RULE_TYPE_MAC: offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET; break; } rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset, pcifunc); if (rsrc_id < 0) goto exit; rsp->rule_idx = rsrc_id; rsp->rule_type = req->rule_type; rsp->dir = req->dir; rsp->mcs_id = req->mcs_id; mutex_unlock(&rvu->rsrc_lock); return 0; exit: if (rsrc_id < 0) dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n", pcifunc); mutex_unlock(&rvu->rsrc_lock); return rsrc_id; } int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu, struct mcs_free_ctrl_pkt_rule_req *req, struct msg_rsp *rsp) { struct mcs *mcs; int rc; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); mutex_lock(&rvu->rsrc_lock); rc = mcs_free_ctrlpktrule(mcs, req); mutex_unlock(&rvu->rsrc_lock); return rc; } int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu, struct mcs_ctrl_pkt_rule_write_req *req, struct msg_rsp *rsp) { struct mcs *mcs; int rc; if (req->mcs_id >= rvu->mcs_blk_cnt) return MCS_AF_ERR_INVALID_MCSID; mcs = mcs_get_pdata(req->mcs_id); rc = mcs_ctrlpktrule_write(mcs, req); return rc; } static void rvu_mcs_set_lmac_bmap(struct rvu *rvu) { struct mcs *mcs = mcs_get_pdata(0); unsigned long lmac_bmap; int cgx, lmac, port; for (port = 0; port < mcs->hw->lmac_cnt; port++) { cgx = port / rvu->hw->lmac_per_cgx; lmac = port % rvu->hw->lmac_per_cgx; if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac)) continue; set_bit(port, &lmac_bmap); } mcs->hw->lmac_bmap = lmac_bmap; } int rvu_mcs_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; int lmac, err = 0, mcs_id; struct mcs *mcs; rvu->mcs_blk_cnt = mcs_get_blkcnt(); if (!rvu->mcs_blk_cnt) return 0; /* Needed only for CN10K-B */ if (rvu->mcs_blk_cnt == 1) { err = mcs_set_lmac_channels(0, hw->cgx_chan_base); if (err) return err; /* Set active lmacs */ rvu_mcs_set_lmac_bmap(rvu); } /* Install default tcam bypass entry and set port to operational mode */ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) { mcs = mcs_get_pdata(mcs_id); mcs_install_flowid_bypass_entry(mcs); for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) mcs_set_lmac_mode(mcs, lmac, 0); mcs->rvu = rvu; /* Allocated memory for PFVF data */ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs, sizeof(struct mcs_pfvf), GFP_KERNEL); if (!mcs->pf) return -ENOMEM; mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs, sizeof(struct mcs_pfvf), GFP_KERNEL); if (!mcs->vf) return -ENOMEM; } /* Initialize the wq for handling mcs interrupts */ INIT_LIST_HEAD(&rvu->mcs_intrq_head); INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task); rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0); if (!rvu->mcs_intr_wq) { dev_err(rvu->dev, "mcs alloc workqueue failed\n"); return -ENOMEM; } return err; } void rvu_mcs_exit(struct rvu *rvu) { if (!rvu->mcs_intr_wq) return; flush_workqueue(rvu->mcs_intr_wq); destroy_workqueue(rvu->mcs_intr_wq); rvu->mcs_intr_wq = NULL; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2018 Marvell. * */ #include <linux/module.h> #include <linux/pci.h> #include "rvu_struct.h" #include "rvu_reg.h" #include "rvu.h" #include "npc.h" #include "cgx.h" #include "lmac_common.h" #include "rvu_npc_hash.h" static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id); static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int type, bool add); static int nix_setup_ipolicers(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr); static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc); static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); static const char *nix_get_ctx_name(int ctype); enum mc_tbl_sz { MC_TBL_SZ_256, MC_TBL_SZ_512, MC_TBL_SZ_1K, MC_TBL_SZ_2K, MC_TBL_SZ_4K, MC_TBL_SZ_8K, MC_TBL_SZ_16K, MC_TBL_SZ_32K, MC_TBL_SZ_64K, }; enum mc_buf_cnt { MC_BUF_CNT_8, MC_BUF_CNT_16, MC_BUF_CNT_32, MC_BUF_CNT_64, MC_BUF_CNT_128, MC_BUF_CNT_256, MC_BUF_CNT_512, MC_BUF_CNT_1024, MC_BUF_CNT_2048, }; enum nix_makr_fmt_indexes { NIX_MARK_CFG_IP_DSCP_RED, NIX_MARK_CFG_IP_DSCP_YELLOW, NIX_MARK_CFG_IP_DSCP_YELLOW_RED, NIX_MARK_CFG_IP_ECN_RED, NIX_MARK_CFG_IP_ECN_YELLOW, NIX_MARK_CFG_IP_ECN_YELLOW_RED, NIX_MARK_CFG_VLAN_DEI_RED, NIX_MARK_CFG_VLAN_DEI_YELLOW, NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, NIX_MARK_CFG_MAX, }; /* For now considering MC resources needed for broadcast * pkt replication only. i.e 256 HWVFs + 12 PFs. */ #define MC_TBL_SIZE MC_TBL_SZ_512 #define MC_BUF_CNT MC_BUF_CNT_128 struct mce { struct hlist_node node; u16 pcifunc; }; int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) { int i = 0; /*If blkaddr is 0, return the first nix block address*/ if (blkaddr == 0) return rvu->nix_blkaddr[blkaddr]; while (i + 1 < MAX_NIX_BLKS) { if (rvu->nix_blkaddr[i] == blkaddr) return rvu->nix_blkaddr[i + 1]; i++; } return 0; } bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return false; return true; } int rvu_get_nixlf_count(struct rvu *rvu) { int blkaddr = 0, max = 0; struct rvu_block *block; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { block = &rvu->hw->block[blkaddr]; max += block->lf.max; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); } return max; } int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (*nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; if (nix_blkaddr) *nix_blkaddr = blkaddr; return 0; } int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, struct nix_hw **nix_hw, int *blkaddr) { struct rvu_pfvf *pfvf; pfvf = rvu_get_pfvf(rvu, pcifunc); *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || *blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; *nix_hw = get_nix_hw(rvu->hw, *blkaddr); if (!*nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; return 0; } static void nix_mce_list_init(struct nix_mce_list *list, int max) { INIT_HLIST_HEAD(&list->head); list->count = 0; list->max = max; } static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) { int idx; if (!mcast) return 0; idx = mcast->next_free_mce; mcast->next_free_mce += count; return idx; } struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) { int nix_blkaddr = 0, i = 0; struct rvu *rvu = hw->rvu; nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); while (nix_blkaddr) { if (blkaddr == nix_blkaddr && hw->nix) return &hw->nix[i]; nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); i++; } return NULL; } int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) { if (hw->cap.nix_multiple_dwrr_mtu) return NIX_AF_DWRR_MTUX(smq_link_type); if (smq_link_type == SMQ_LINK_TYPE_SDP) return NIX_AF_DWRR_SDP_MTU; /* Here it's same reg for RPM and LBK */ return NIX_AF_DWRR_RPM_MTU; } u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) { dwrr_mtu &= 0x1FULL; /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. * Value of 4 is reserved for MTU value of 9728 bytes. * Value of 5 is reserved for MTU value of 10240 bytes. */ switch (dwrr_mtu) { case 4: return 9728; case 5: return 10240; default: return BIT_ULL(dwrr_mtu); } return 0; } u32 convert_bytes_to_dwrr_mtu(u32 bytes) { /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. * Value of 4 is reserved for MTU value of 9728 bytes. * Value of 5 is reserved for MTU value of 10240 bytes. */ if (bytes > BIT_ULL(16)) return 0; switch (bytes) { case 9728: return 4; case 10240: return 5; default: return ilog2(bytes); } return 0; } static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; /* Sync all in flight RX packets to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); /* SW_SYNC ensures all existing transactions are finished and pkts * are written to LLC/DRAM, queues should be teared down after * successful SW_SYNC. Due to a HW errata, in some rare scenarios * an existing transaction might end after SW_SYNC operation. To * ensure operation is fully done, do the SW_SYNC twice. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; u16 map_func; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return false; txsch = &nix_hw->txsch[lvl]; /* Check out of bounds */ if (schq >= txsch->schq.max) return false; mutex_lock(&rvu->rsrc_lock); map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); mutex_unlock(&rvu->rsrc_lock); /* TLs aggegating traffic are shared across PF and VFs */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) return false; else return true; } if (map_func != pcifunc) return false; return true; } static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, struct nix_lf_alloc_rsp *rsp, bool loop) { struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); u16 req_chan_base, req_chan_end, req_chan_cnt; struct rvu_hwinfo *hw = rvu->hw; struct sdp_node_info *sdp_info; int pkind, pf, vf, lbkid, vfid; u8 cgx_id, lmac_id; bool from_vf; int err; pf = rvu_get_pf(pcifunc); if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && type != NIX_INTF_TYPE_SDP) return 0; switch (type) { case NIX_INTF_TYPE_CGX: pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf]; rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); pkind = rvu_npc_get_pkind(rvu, pf); if (pkind < 0) { dev_err(rvu->dev, "PF_Func 0x%x: Invalid pkind\n", pcifunc); return -EINVAL; } pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; /* If NIX1 block is present on the silicon then NIXes are * assigned alternatively for lbk interfaces. NIX0 should * send packets on lbk link 1 channels and NIX1 should send * on lbk link 0 channels for the communication between * NIX0 and NIX1. */ lbkid = 0; if (rvu->hw->lbk_links > 1) lbkid = vf & 0x1 ? 0 : 1; /* By default NIX0 is configured to send packet on lbk link 1 * (which corresponds to LBK1), same packet will receive on * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 * (which corresponds to LBK2) packet will receive on NIX0 lbk * link 1. * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 * transmits and receives on lbk link 0, whick corresponds * to LBK1 block, back to back connectivity between NIX and * LBK can be achieved (which is similar to 96xx) * * RX TX * NIX0 lbk link 1 (LBK2) 1 (LBK1) * NIX0 lbk link 0 (LBK0) 0 (LBK0) * NIX1 lbk link 0 (LBK1) 0 (LBK2) * NIX1 lbk link 1 (LBK3) 1 (LBK3) */ if (loop) lbkid = !lbkid; /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. */ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); pfvf->tx_chan_base = vf & 0x1 ? rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rsp->tx_link = hw->cgx_links + lbkid; pfvf->lbkid = lbkid; rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); break; case NIX_INTF_TYPE_SDP: from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; sdp_info = parent_pf->sdp_info; if (!sdp_info) { dev_err(rvu->dev, "Invalid sdp_info pointer\n"); return -EINVAL; } if (from_vf) { req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + sdp_info->num_pf_rings; vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; for (vfid = 0; vfid < vf; vfid++) req_chan_base += sdp_info->vf_rings[vfid]; req_chan_cnt = sdp_info->vf_rings[vf]; req_chan_end = req_chan_base + req_chan_cnt - 1; if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { dev_err(rvu->dev, "PF_Func 0x%x: Invalid channel base and count\n", pcifunc); return -EINVAL; } } else { req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; req_chan_cnt = sdp_info->num_pf_rings; } pfvf->rx_chan_base = req_chan_base; pfvf->rx_chan_cnt = req_chan_cnt; pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; rsp->tx_link = hw->cgx_links + hw->lbk_links; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); break; } /* Add a UCAST forwarding rule in MCAM with this NIXLF attached * RVU PF/VF's MAC address. */ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->mac_addr); /* Add this PF_FUNC to bcast pkt replication list */ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); if (err) { dev_err(rvu->dev, "Bcast list, failed to enable PF_FUNC 0x%x\n", pcifunc); return err; } /* Install MCAM rule matching Ethernet broadcast mac address */ rvu_npc_install_bcast_match_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); pfvf->maxlen = NIC_HW_MIN_FRS; pfvf->minlen = NIC_HW_MIN_FRS; return 0; } static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int err; pfvf->maxlen = 0; pfvf->minlen = 0; /* Remove this PF_FUNC from bcast pkt replication list */ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); if (err) { dev_err(rvu->dev, "Bcast list, failed to disable PF_FUNC 0x%x\n", pcifunc); } /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); /* Disable DMAC filters used */ rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int blkaddr, pf, type; u16 chan_base, chan; u64 cfg; pf = rvu_get_pf(pcifunc); type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), cfg & ~BIT_ULL(16)); } return 0; } static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; struct rvu_hwinfo *hw = rvu->hw; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); lmac_chan_cnt = cfg & 0xFF; cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); sdp_chan_cnt = cfg & 0xFFF; sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); /* Backpressure IDs range division * CGX channles are mapped to (0 - 191) BPIDs * LBK channles are mapped to (192 - 255) BPIDs * SDP channles are mapped to (256 - 511) BPIDs * * Lmac channles and bpids mapped as follows * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... */ switch (type) { case NIX_INTF_TYPE_CGX: if ((req->chan_base + req->chan_cnt) > 16) return -EINVAL; rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); /* Assign bpid based on cgx, lmac and chan id */ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + (lmac_id * lmac_chan_cnt) + req->chan_base; if (req->bpid_per_chan) bpid += chan_id; if (bpid > cgx_bpid_cnt) return -EINVAL; break; case NIX_INTF_TYPE_LBK: if ((req->chan_base + req->chan_cnt) > 63) return -EINVAL; bpid = cgx_bpid_cnt + req->chan_base; if (req->bpid_per_chan) bpid += chan_id; if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) return -EINVAL; break; case NIX_INTF_TYPE_SDP: if ((req->chan_base + req->chan_cnt) > 255) return -EINVAL; bpid = sdp_bpid_cnt + req->chan_base; if (req->bpid_per_chan) bpid += chan_id; if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) return -EINVAL; break; default: return -EINVAL; } return bpid; } int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct nix_bp_cfg_rsp *rsp) { int blkaddr, pf, type, chan_id = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u16 chan_base, chan; s16 bpid, bpid_base; u64 cfg; pf = rvu_get_pf(pcifunc); type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) type = NIX_INTF_TYPE_SDP; /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && type != NIX_INTF_TYPE_SDP) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); chan_base = pfvf->rx_chan_base + req->chan_base; bpid = bpid_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { if (bpid < 0) { dev_warn(rvu->dev, "Fail to enable backpressure\n"); return -EINVAL; } cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); cfg &= ~GENMASK_ULL(8, 0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); } for (chan = 0; chan < req->chan_cnt; chan++) { /* Map channel and bpid assign to it */ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | (bpid_base & 0x3FF); if (req->bpid_per_chan) bpid_base++; } rsp->chan_cnt = req->chan_cnt; return 0; } static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx) { struct nix_lso_format field = {0}; /* IP's Length field */ field.layer = NIX_TXLAYER_OL3; /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ field.offset = v4 ? 2 : 4; field.sizem1 = 1; /* i.e 2 bytes */ field.alg = NIX_LSOALG_ADD_PAYLEN; rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), *(u64 *)&field); /* No ID field in IPv6 header */ if (!v4) return; /* IP's ID field */ field.layer = NIX_TXLAYER_OL3; field.offset = 4; field.sizem1 = 1; /* i.e 2 bytes */ field.alg = NIX_LSOALG_ADD_SEGNUM; rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), *(u64 *)&field); } static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, u64 format, u64 *fidx) { struct nix_lso_format field = {0}; /* TCP's sequence number field */ field.layer = NIX_TXLAYER_OL4; field.offset = 4; field.sizem1 = 3; /* i.e 4 bytes */ field.alg = NIX_LSOALG_ADD_OFFSET; rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), *(u64 *)&field); /* TCP's flags field */ field.layer = NIX_TXLAYER_OL4; field.offset = 12; field.sizem1 = 1; /* 2 bytes */ field.alg = NIX_LSOALG_TCP_FLAGS; rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), *(u64 *)&field); } static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { u64 cfg, idx, fidx = 0; /* Get max HW supported format indices */ cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; nix_hw->lso.total = cfg; /* Enable LSO */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); /* For TSO, set first and middle segment flags to * mask out PSH, RST & FIN flags in TCP packet */ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16)); cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); /* Setup default static LSO formats * * Configure format fields for TCPv4 segmentation offload */ idx = NIX_LSO_FORMAT_IDX_TSOV4; nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); /* Set rest of the fields to NOP */ for (; fidx < 8; fidx++) { rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); } nix_hw->lso.in_use++; /* Configure format fields for TCPv6 segmentation offload */ idx = NIX_LSO_FORMAT_IDX_TSOV6; fidx = 0; nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx); nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); /* Set rest of the fields to NOP */ for (; fidx < 8; fidx++) { rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); } nix_hw->lso.in_use++; } static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) { kfree(pfvf->rq_bmap); kfree(pfvf->sq_bmap); kfree(pfvf->cq_bmap); if (pfvf->rq_ctx) qmem_free(rvu->dev, pfvf->rq_ctx); if (pfvf->sq_ctx) qmem_free(rvu->dev, pfvf->sq_ctx); if (pfvf->cq_ctx) qmem_free(rvu->dev, pfvf->cq_ctx); if (pfvf->rss_ctx) qmem_free(rvu->dev, pfvf->rss_ctx); if (pfvf->nix_qints_ctx) qmem_free(rvu->dev, pfvf->nix_qints_ctx); if (pfvf->cq_ints_ctx) qmem_free(rvu->dev, pfvf->cq_ints_ctx); pfvf->rq_bmap = NULL; pfvf->cq_bmap = NULL; pfvf->sq_bmap = NULL; pfvf->rq_ctx = NULL; pfvf->sq_ctx = NULL; pfvf->cq_ctx = NULL; pfvf->rss_ctx = NULL; pfvf->nix_qints_ctx = NULL; pfvf->cq_ints_ctx = NULL; } static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size, u64 way_mask, bool tag_lsb_as_adder) { int err, grp, num_indices; u64 val; /* RSS is not requested for this NIXLF */ if (!rss_sz) return 0; num_indices = rss_sz * rss_grps; /* Alloc NIX RSS HW context memory and config the base */ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size); if (err) return err; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf), (u64)pfvf->rss_ctx->iova); /* Config full RSS table size, enable RSS and caching */ val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); if (tag_lsb_as_adder) val |= BIT_ULL(5); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp)); return 0; } static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, struct nix_aq_inst_s *inst) { struct admin_queue *aq = block->aq; struct nix_aq_res_s *result; int timeout = 1000; u64 reg, head; int ret; result = (struct nix_aq_res_s *)aq->res->base; /* Get current head pointer where to append this instruction */ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS); head = (reg >> 4) & AQ_PTR_MASK; memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), (void *)inst, aq->inst->entry_sz); memset(result, 0, sizeof(*result)); /* sync into memory */ wmb(); /* Ring the doorbell and wait for result */ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1); while (result->compcode == NIX_AQ_COMP_NOTDONE) { cpu_relax(); udelay(1); timeout--; if (!timeout) return -EBUSY; } if (result->compcode != NIX_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ if (result->compcode == NIX_AQ_COMP_CTX_FAULT || result->compcode == NIX_AQ_COMP_LOCKERR || result->compcode == NIX_AQ_COMP_CTX_POISON) { ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); if (ret) dev_err(rvu->dev, "%s: Not able to unlock cachelines\n", __func__); } return -EBUSY; } return 0; } static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, u16 *smq, u16 *smq_mask) { struct nix_cn10k_aq_enq_req *aq_req; if (!is_rvu_otx2(rvu)) { aq_req = (struct nix_cn10k_aq_enq_req *)req; *smq = aq_req->sq.smq; *smq_mask = aq_req->sq_mask.smq; } else { *smq = req->sq.smq; *smq_mask = req->sq_mask.smq; } } static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int nixlf, blkaddr, rc = 0; struct nix_aq_inst_s inst; struct rvu_block *block; struct admin_queue *aq; struct rvu_pfvf *pfvf; u16 smq, smq_mask; void *ctx, *mask; bool ena; u64 cfg; blkaddr = nix_hw->blkaddr; block = &hw->block[blkaddr]; aq = block->aq; if (!aq) { dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__); return NIX_AF_ERR_AQ_ENQUEUE; } pfvf = rvu_get_pfvf(rvu, pcifunc); nixlf = rvu_get_lf(rvu, block, pcifunc, 0); /* Skip NIXLF check for broadcast MCE entry and bandwidth profile * operations done by AF itself. */ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { if (!pfvf->nixlf || nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; } switch (req->ctype) { case NIX_AQ_CTYPE_RQ: /* Check if index exceeds max no of queues */ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize) rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_SQ: if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize) rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_CQ: if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize) rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_RSS: /* Check if RSS is enabled and qidx is within range */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf)); if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx || (req->qidx >= (256UL << (cfg & 0xF)))) rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_MCE: cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); /* Check if index exceeds MCE list length */ if (!nix_hw->mcast.mce_ctx || (req->qidx >= (256UL << (cfg & 0xF)))) rc = NIX_AF_ERR_AQ_ENQUEUE; /* Adding multicast lists for requests from PF/VFs is not * yet supported, so ignore this. */ if (rsp) rc = NIX_AF_ERR_AQ_ENQUEUE; break; case NIX_AQ_CTYPE_BANDPROF: if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, nix_hw, pcifunc)) rc = NIX_AF_ERR_INVALID_BANDPROF; break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; } if (rc) return rc; nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ && ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || (req->op == NIX_AQ_INSTOP_WRITE && req->sq_mask.ena && req->sq.ena && smq_mask))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, pcifunc, smq)) return NIX_AF_ERR_AQ_ENQUEUE; } memset(&inst, 0, sizeof(struct nix_aq_inst_s)); inst.lf = nixlf; inst.cindex = req->qidx; inst.ctype = req->ctype; inst.op = req->op; /* Currently we are not supporting enqueuing multiple instructions, * so always choose first entry in result memory. */ inst.res_addr = (u64)aq->res->iova; /* Hardware uses same aq->res->base for updating result of * previous instruction hence wait here till it is done. */ spin_lock(&aq->lock); /* Clean result + context memory */ memset(aq->res->base, 0, aq->res->entry_sz); /* Context needs to be written at RES_ADDR + 128 */ ctx = aq->res->base + 128; /* Mask needs to be written at RES_ADDR + 256 */ mask = aq->res->base + 256; switch (req->op) { case NIX_AQ_INSTOP_WRITE: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(mask, &req->rq_mask, sizeof(struct nix_rq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(mask, &req->sq_mask, sizeof(struct nix_sq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(mask, &req->rss_mask, sizeof(struct nix_rsse_s)); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(mask, &req->prof_mask, sizeof(struct nix_bandprof_s)); fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: case NIX_AQ_INSTOP_LOCK: case NIX_AQ_INSTOP_UNLOCK: break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; spin_unlock(&aq->lock); return rc; } /* Submit the instruction to AQ */ rc = nix_aq_enqueue_wait(rvu, block, &inst); if (rc) { spin_unlock(&aq->lock); return rc; } /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */ if (req->op == NIX_AQ_INSTOP_INIT) { if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena) __set_bit(req->qidx, pfvf->rq_bmap); if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) __set_bit(req->qidx, pfvf->sq_bmap); if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) __set_bit(req->qidx, pfvf->cq_bmap); } if (req->op == NIX_AQ_INSTOP_WRITE) { if (req->ctype == NIX_AQ_CTYPE_RQ) { ena = (req->rq.ena & req->rq_mask.ena) | (test_bit(req->qidx, pfvf->rq_bmap) & ~req->rq_mask.ena); if (ena) __set_bit(req->qidx, pfvf->rq_bmap); else __clear_bit(req->qidx, pfvf->rq_bmap); } if (req->ctype == NIX_AQ_CTYPE_SQ) { ena = (req->rq.ena & req->sq_mask.ena) | (test_bit(req->qidx, pfvf->sq_bmap) & ~req->sq_mask.ena); if (ena) __set_bit(req->qidx, pfvf->sq_bmap); else __clear_bit(req->qidx, pfvf->sq_bmap); } if (req->ctype == NIX_AQ_CTYPE_CQ) { ena = (req->rq.ena & req->cq_mask.ena) | (test_bit(req->qidx, pfvf->cq_bmap) & ~req->cq_mask.ena); if (ena) __set_bit(req->qidx, pfvf->cq_bmap); else __clear_bit(req->qidx, pfvf->cq_bmap); } } if (rsp) { /* Copy read context into mailbox */ if (req->op == NIX_AQ_INSTOP_READ) { if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(&rsp->rq, ctx, sizeof(struct nix_rq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(&rsp->sq, ctx, sizeof(struct nix_sq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(&rsp->cq, ctx, sizeof(struct nix_cq_ctx_s)); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(&rsp->rss, ctx, sizeof(struct nix_rsse_s)); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, sizeof(struct nix_rx_mce_s)); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(&rsp->prof, ctx, sizeof(struct nix_bandprof_s)); } } spin_unlock(&aq->lock); return 0; } static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_aq_enq_req *req, u8 ctype) { struct nix_cn10k_aq_enq_req aq_req; struct nix_cn10k_aq_enq_rsp aq_rsp; int rc, word; if (req->ctype != NIX_AQ_CTYPE_CQ) return 0; rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, req->hdr.pcifunc, ctype, req->qidx); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", __func__, nix_get_ctx_name(ctype), req->qidx, req->hdr.pcifunc); return rc; } /* Make copy of original context & mask which are required * for resubmission */ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); /* exclude fields which HW can update */ aq_req.cq_mask.cq_err = 0; aq_req.cq_mask.wrptr = 0; aq_req.cq_mask.tail = 0; aq_req.cq_mask.head = 0; aq_req.cq_mask.avg_level = 0; aq_req.cq_mask.update_time = 0; aq_req.cq_mask.substream = 0; /* Context mask (cq_mask) holds mask value of fields which * are changed in AQ WRITE operation. * for example cq.drop = 0xa; * cq_mask.drop = 0xff; * Below logic performs '&' between cq and cq_mask so that non * updated fields are masked out for request and response * comparison */ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); word++) { *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); *(u64 *)((u8 *)&aq_req.cq + word * 8) &= (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); } if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; return 0; } static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; int err, retries = 5; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; retry: err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' * As a work around perfrom CQ context read after each AQ write. If AQ * read shows AQ write is not updated perform AQ write again. */ if (!err && req->op == NIX_AQ_INSTOP_WRITE) { err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { if (retries--) goto retry; else return NIX_AF_ERR_CQ_CTX_WRITE_ERR; } } return err; } static const char *nix_get_ctx_name(int ctype) { switch (ctype) { case NIX_AQ_CTYPE_CQ: return "CQ"; case NIX_AQ_CTYPE_SQ: return "SQ"; case NIX_AQ_CTYPE_RQ: return "RQ"; case NIX_AQ_CTYPE_RSS: return "RSS"; } return ""; } static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct nix_aq_enq_req aq_req; unsigned long *bmap; int qidx, q_cnt = 0; int err = 0, rc; if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx) return NIX_AF_ERR_AQ_ENQUEUE; memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); aq_req.hdr.pcifunc = req->hdr.pcifunc; if (req->ctype == NIX_AQ_CTYPE_CQ) { aq_req.cq.ena = 0; aq_req.cq_mask.ena = 1; aq_req.cq.bp_ena = 0; aq_req.cq_mask.bp_ena = 1; q_cnt = pfvf->cq_ctx->qsize; bmap = pfvf->cq_bmap; } if (req->ctype == NIX_AQ_CTYPE_SQ) { aq_req.sq.ena = 0; aq_req.sq_mask.ena = 1; q_cnt = pfvf->sq_ctx->qsize; bmap = pfvf->sq_bmap; } if (req->ctype == NIX_AQ_CTYPE_RQ) { aq_req.rq.ena = 0; aq_req.rq_mask.ena = 1; q_cnt = pfvf->rq_ctx->qsize; bmap = pfvf->rq_bmap; } aq_req.ctype = req->ctype; aq_req.op = NIX_AQ_INSTOP_WRITE; for (qidx = 0; qidx < q_cnt; qidx++) { if (!test_bit(qidx, bmap)) continue; aq_req.qidx = qidx; rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", nix_get_ctx_name(req->ctype), qidx); } } return err; } #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) { struct nix_aq_enq_req lock_ctx_req; int err; if (req->op != NIX_AQ_INSTOP_INIT) return 0; if (req->ctype == NIX_AQ_CTYPE_MCE || req->ctype == NIX_AQ_CTYPE_DYNO) return 0; memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; lock_ctx_req.ctype = req->ctype; lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; lock_ctx_req.qidx = req->qidx; err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); if (err) dev_err(rvu->dev, "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", req->hdr.pcifunc, nix_get_ctx_name(req->ctype), req->qidx); return err; } int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { int err; err = rvu_nix_aq_enq_inst(rvu, req, rsp); if (!err) err = nix_lf_hwctx_lockdown(rvu, req); return err; } #else int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } #endif /* CN10K mbox handler */ int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, struct nix_cn10k_aq_enq_req *req, struct nix_cn10k_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, (struct nix_aq_enq_rsp *)rsp); } int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { return nix_lf_hwctx_disable(rvu, req); } int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp) { int nixlf, qints, hwctx_size, intf, err, rc = 0; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; struct rvu_pfvf *pfvf; u64 cfg, ctx_cfg; int blkaddr; if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) return NIX_AF_ERR_PARAM; if (req->way_mask) req->way_mask &= 0xFFFF; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; nixlf = rvu_get_lf(rvu, block, pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ if (req->npa_func) { /* If default, use 'this' NIXLF's PFFUNC */ if (req->npa_func == RVU_DEFAULT_PF_FUNC) req->npa_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) return NIX_AF_INVAL_NPA_PF_FUNC; } /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ if (req->sso_func) { /* If default, use 'this' NIXLF's PFFUNC */ if (req->sso_func == RVU_DEFAULT_PF_FUNC) req->sso_func = pcifunc; if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) return NIX_AF_INVAL_SSO_PF_FUNC; } /* If RSS is being enabled, check if requested config is valid. * RSS table size should be power of two, otherwise * RSS_GRP::OFFSET + adder might go beyond that group or * won't be able to use entire table. */ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE || !is_power_of_2(req->rss_sz))) return NIX_AF_ERR_RSS_SIZE_INVALID; if (req->rss_sz && (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS)) return NIX_AF_ERR_RSS_GRPS_INVALID; /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", block->addr - BLKADDR_NIX0, nixlf); return NIX_AF_ERR_LF_RESET; } ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); /* Alloc NIX RQ HW context memory and config the base */ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size); if (err) goto free_mem; pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL); if (!pfvf->rq_bmap) goto free_mem; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf), (u64)pfvf->rq_ctx->iova); /* Set caching and queue count in HW */ cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); /* Alloc NIX SQ HW context memory and config the base */ hwctx_size = 1UL << (ctx_cfg & 0xF); err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size); if (err) goto free_mem; pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL); if (!pfvf->sq_bmap) goto free_mem; rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), (u64)pfvf->sq_ctx->iova); cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); /* Alloc NIX CQ HW context memory and config the base */ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size); if (err) goto free_mem; pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL); if (!pfvf->cq_bmap) goto free_mem; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), (u64)pfvf->cq_ctx->iova); cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, req->rss_grps, hwctx_size, req->way_mask, !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); if (err) goto free_mem; /* Alloc memory for CQINT's HW contexts */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); qints = (cfg >> 24) & 0xFFF; hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); if (err) goto free_mem; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), (u64)pfvf->cq_ints_ctx->iova); rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36) | req->way_mask << 20); /* Alloc memory for QINT's HW contexts */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); qints = (cfg >> 12) & 0xFFF; hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); if (err) goto free_mem; rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), (u64)pfvf->nix_qints_ctx->iova); rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36) | req->way_mask << 20); /* Setup VLANX TPID's. * Use VLAN1 for 802.1Q * and VLAN0 for 802.1AD. */ cfg = (0x8100ULL << 16) | 0x88A8ULL; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); /* Enable LMTST for this NIX LF */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ if (req->npa_func) cfg = req->npa_func; if (req->sso_func) cfg |= (u64)req->sso_func << 16; cfg |= (u64)req->xqe_sz << 33; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg); /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); /* Configure pkind for TX parse config */ cfg = NPC_TX_DEF_PKIND; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) intf = NIX_INTF_TYPE_SDP; err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, !!(req->flags & NIX_LF_LBK_BLK_SEL)); if (err) goto free_mem; /* Disable NPC entries as NIXLF's contexts are not initialized yet */ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); /* Configure RX VTAG Type 7 (strip) for vf vlan */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), VTAGSIZE_T4 | VTAG_STRIP); goto exit; free_mem: nix_ctx_free(rvu, pfvf); rc = -ENOMEM; exit: /* Set macaddr of this PF/VF */ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); /* set SQB size info */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST); rsp->sqb_size = (cfg >> 34) & 0xFFFF; rsp->rx_chan_base = pfvf->rx_chan_base; rsp->tx_chan_base = pfvf->tx_chan_base; rsp->rx_chan_cnt = pfvf->rx_chan_cnt; rsp->tx_chan_cnt = pfvf->tx_chan_cnt; rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; /* Get HW supported stat count */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); /* Get count of CQ IRQs and error IRQs supported per LF */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); rsp->qints = ((cfg >> 12) & 0xFFF); rsp->cints = ((cfg >> 24) & 0xFFF); rsp->cgx_links = hw->cgx_links; rsp->lbk_links = hw->lbk_links; rsp->sdp_links = hw->sdp_links; return rc; } int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; nixlf = rvu_get_lf(rvu, block, pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; if (req->flags & NIX_LF_DISABLE_FLOWS) rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); else rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); /* Free any tx vtag def entries used by this NIX LF */ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) nix_free_tx_vtag_entries(rvu, pcifunc); nix_interface_deinit(rvu, pcifunc, nixlf); /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n", block->addr - BLKADDR_NIX0, nixlf); return NIX_AF_ERR_LF_RESET; } nix_ctx_free(rvu, pfvf); return 0; } int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, struct nix_mark_format_cfg *req, struct nix_mark_format_cfg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; int blkaddr, rc; u32 cfg; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; cfg = (((u32)req->offset & 0x7) << 16) | (((u32)req->y_mask & 0xF) << 12) | (((u32)req->y_val & 0xF) << 8) | (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); if (rc < 0) { dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); return NIX_AF_ERR_MARK_CFG_FAIL; } rsp->mark_format_idx = rc; return 0; } /* Handle shaper update specially for few revisions */ static bool handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, int lvl, u64 reg, u64 regval) { u64 regbase, oldval, sw_xoff = 0; u64 dbgval, md_debug0 = 0; unsigned long poll_tmo; bool rate_reg = 0; u32 schq; regbase = reg & 0xFFFF; schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); /* Check for rate register */ switch (lvl) { case NIX_TXSCH_LVL_TL1: md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); break; case NIX_TXSCH_LVL_TL2: md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || regbase == NIX_AF_TL2X_PIR(0)); break; case NIX_TXSCH_LVL_TL3: md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || regbase == NIX_AF_TL3X_PIR(0)); break; case NIX_TXSCH_LVL_TL4: md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || regbase == NIX_AF_TL4X_PIR(0)); break; case NIX_TXSCH_LVL_MDQ: sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || regbase == NIX_AF_MDQX_PIR(0)); break; } if (!rate_reg) return false; /* Nothing special to do when state is not toggled */ oldval = rvu_read64(rvu, blkaddr, reg); if ((oldval & 0x1) == (regval & 0x1)) { rvu_write64(rvu, blkaddr, reg, regval); return true; } /* PIR/CIR disable */ if (!(regval & 0x1)) { rvu_write64(rvu, blkaddr, sw_xoff, 1); rvu_write64(rvu, blkaddr, reg, 0); udelay(4); rvu_write64(rvu, blkaddr, sw_xoff, 0); return true; } /* PIR/CIR enable */ rvu_write64(rvu, blkaddr, sw_xoff, 1); if (md_debug0) { poll_tmo = jiffies + usecs_to_jiffies(10000); /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ do { if (time_after(jiffies, poll_tmo)) { dev_err(rvu->dev, "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", nixlf, schq, lvl); goto exit; } usleep_range(1, 5); dbgval = rvu_read64(rvu, blkaddr, md_debug0); } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); } rvu_write64(rvu, blkaddr, reg, regval); exit: rvu_write64(rvu, blkaddr, sw_xoff, 0); return true; } static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, int lvl, int schq) { u64 tlx_parent = 0, tlx_schedule = 0; switch (lvl) { case NIX_TXSCH_LVL_TL2: tlx_parent = NIX_AF_TL2X_PARENT(schq); tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_TL3: tlx_parent = NIX_AF_TL3X_PARENT(schq); tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_TL4: tlx_parent = NIX_AF_TL4X_PARENT(schq); tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); break; case NIX_TXSCH_LVL_MDQ: /* no need to reset SMQ_CFG as HW clears this CSR * on SMQ flush */ tlx_parent = NIX_AF_MDQX_PARENT(schq); tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); break; default: return; } if (tlx_parent) rvu_write64(rvu, blkaddr, tlx_parent, 0x0); if (tlx_schedule) rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); } /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, int nixlf, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; u64 cir_reg = 0, pir_reg = 0; u64 cfg; switch (lvl) { case NIX_TXSCH_LVL_TL1: cir_reg = NIX_AF_TL1X_CIR(schq); pir_reg = 0; /* PIR not available at TL1 */ break; case NIX_TXSCH_LVL_TL2: cir_reg = NIX_AF_TL2X_CIR(schq); pir_reg = NIX_AF_TL2X_PIR(schq); break; case NIX_TXSCH_LVL_TL3: cir_reg = NIX_AF_TL3X_CIR(schq); pir_reg = NIX_AF_TL3X_PIR(schq); break; case NIX_TXSCH_LVL_TL4: cir_reg = NIX_AF_TL4X_CIR(schq); pir_reg = NIX_AF_TL4X_PIR(schq); break; case NIX_TXSCH_LVL_MDQ: cir_reg = NIX_AF_MDQX_CIR(schq); pir_reg = NIX_AF_MDQX_PIR(schq); break; } /* Shaper state toggle needs wait/poll */ if (hw->cap.nix_shaper_toggle_wait) { if (cir_reg) handle_txschq_shaper_update(rvu, blkaddr, nixlf, lvl, cir_reg, 0); if (pir_reg) handle_txschq_shaper_update(rvu, blkaddr, nixlf, lvl, pir_reg, 0); return; } if (!cir_reg) return; cfg = rvu_read64(rvu, blkaddr, cir_reg); rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0)); if (!pir_reg) return; cfg = rvu_read64(rvu, blkaddr, pir_reg); rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0)); } static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; int link_level; int link; if (lvl >= hw->cap.nix_tx_aggr_lvl) return; /* Reset TL4's SDP link config */ if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; if (lvl != link_level) return; /* Reset TL2's CGX or LBK link config */ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) rvu_write64(rvu, blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; u64 reg; /* Skip this if shaping is not supported */ if (!hw->cap.nix_shaping) return; /* Clear level specific SW_XOFF */ switch (lvl) { case NIX_TXSCH_LVL_TL1: reg = NIX_AF_TL1X_SW_XOFF(schq); break; case NIX_TXSCH_LVL_TL2: reg = NIX_AF_TL2X_SW_XOFF(schq); break; case NIX_TXSCH_LVL_TL3: reg = NIX_AF_TL3X_SW_XOFF(schq); break; case NIX_TXSCH_LVL_TL4: reg = NIX_AF_TL4X_SW_XOFF(schq); break; case NIX_TXSCH_LVL_MDQ: reg = NIX_AF_MDQX_SW_XOFF(schq); break; default: return; } rvu_write64(rvu, blkaddr, reg, 0x0); } static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; if (is_afvf(pcifunc)) {/* LBK links */ return hw->cgx_links; } else if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return (cgx_id * hw->lmac_per_cgx) + lmac_id; } /* SDP link */ return hw->cgx_links + hw->lbk_links; } static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, int link, int *start, int *end) { struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); if (is_afvf(pcifunc)) { /* LBK links */ *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_lbk_lmac; } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_cgx_lmac; } else { /* SDP link */ *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); *end = *start + hw->cap.nix_txsch_per_sdp_lmac; } } static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, struct nix_hw *nix_hw, struct nix_txsch_alloc_req *req) { struct rvu_hwinfo *hw = rvu->hw; int schq, req_schq, free_cnt; struct nix_txsch *txsch; int link, start, end; txsch = &nix_hw->txsch[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl]; if (!req_schq) return 0; link = nix_get_tx_link(rvu, pcifunc); /* For traffic aggregating scheduler level, one queue is enough */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { if (req_schq != 1) return NIX_AF_ERR_TLX_ALLOC_FAIL; return 0; } /* Get free SCHQ count and check if request can be accomodated */ if (hw->cap.nix_fixed_txschq_mapping) { nix_get_txschq_range(rvu, pcifunc, link, &start, &end); schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); if (end <= txsch->schq.max && schq < end && !test_bit(schq, txsch->schq.bmap)) free_cnt = 1; else free_cnt = 0; } else { free_cnt = rvu_rsrc_free_count(&txsch->schq); } if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) return NIX_AF_ERR_TLX_ALLOC_FAIL; /* If contiguous queues are needed, check for availability */ if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) return NIX_AF_ERR_TLX_ALLOC_FAIL; return 0; } static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, struct nix_txsch_alloc_rsp *rsp, int lvl, int start, int end) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = rsp->hdr.pcifunc; int idx, schq; /* For traffic aggregating levels, queue alloc is based * on transmit link to which PF_FUNC is mapped to. */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { /* A single TL queue is allocated */ if (rsp->schq_contig[lvl]) { rsp->schq_contig[lvl] = 1; rsp->schq_contig_list[lvl][0] = start; } /* Both contig and non-contig reqs doesn't make sense here */ if (rsp->schq_contig[lvl]) rsp->schq[lvl] = 0; if (rsp->schq[lvl]) { rsp->schq[lvl] = 1; rsp->schq_list[lvl][0] = start; } return; } /* Adjust the queue request count if HW supports * only one queue per level configuration. */ if (hw->cap.nix_fixed_txschq_mapping) { idx = pcifunc & RVU_PFVF_FUNC_MASK; schq = start + idx; if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { rsp->schq_contig[lvl] = 0; rsp->schq[lvl] = 0; return; } if (rsp->schq_contig[lvl]) { rsp->schq_contig[lvl] = 1; set_bit(schq, txsch->schq.bmap); rsp->schq_contig_list[lvl][0] = schq; rsp->schq[lvl] = 0; } else if (rsp->schq[lvl]) { rsp->schq[lvl] = 1; set_bit(schq, txsch->schq.bmap); rsp->schq_list[lvl][0] = schq; } return; } /* Allocate contiguous queue indices requesty first */ if (rsp->schq_contig[lvl]) { schq = bitmap_find_next_zero_area(txsch->schq.bmap, txsch->schq.max, start, rsp->schq_contig[lvl], 0); if (schq >= end) rsp->schq_contig[lvl] = 0; for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { set_bit(schq, txsch->schq.bmap); rsp->schq_contig_list[lvl][idx] = schq; schq++; } } /* Allocate non-contiguous queue indices */ if (rsp->schq[lvl]) { idx = 0; for (schq = start; schq < end; schq++) { if (!test_bit(schq, txsch->schq.bmap)) { set_bit(schq, txsch->schq.bmap); rsp->schq_list[lvl][idx++] = schq; } if (idx == rsp->schq[lvl]) break; } /* Update how many were allocated */ rsp->schq[lvl] = idx; } } int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int link, blkaddr, rc = 0; int lvl, idx, start, end; struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; int nixlf; u16 schq; rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (rc) return rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; mutex_lock(&rvu->rsrc_lock); /* Check if request is valid as per HW capabilities * and can be accomodated. */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); if (rc) goto err; } /* Allocate requested Tx scheduler queues */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; pfvf_map = txsch->pfvf_map; if (!req->schq[lvl] && !req->schq_contig[lvl]) continue; rsp->schq[lvl] = req->schq[lvl]; rsp->schq_contig[lvl] = req->schq_contig[lvl]; link = nix_get_tx_link(rvu, pcifunc); if (lvl >= hw->cap.nix_tx_aggr_lvl) { start = link; end = link; } else if (hw->cap.nix_fixed_txschq_mapping) { nix_get_txschq_range(rvu, pcifunc, link, &start, &end); } else { start = 0; end = txsch->schq.max; } nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); /* Reset queue config */ for (idx = 0; idx < req->schq_contig[lvl]; idx++) { schq = rsp->schq_contig_list[lvl][idx]; if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); } for (idx = 0; idx < req->schq[lvl]; idx++) { schq = rsp->schq_list[lvl][idx]; if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); } } rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; goto exit; err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; exit: mutex_unlock(&rvu->rsrc_lock); return rc; } static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, struct nix_smq_flush_ctx *smq_flush_ctx) { struct nix_smq_tree_ctx *smq_tree_ctx; u64 parent_off, regval; u16 schq; int lvl; smq_flush_ctx->smq = smq; schq = smq; for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; if (lvl == NIX_TXSCH_LVL_TL1) { smq_flush_ctx->tl1_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); smq_tree_ctx->pir_off = 0; smq_tree_ctx->pir_val = 0; parent_off = 0; } else if (lvl == NIX_TXSCH_LVL_TL2) { smq_flush_ctx->tl2_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); parent_off = NIX_AF_TL2X_PARENT(schq); } else if (lvl == NIX_TXSCH_LVL_TL3) { smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); parent_off = NIX_AF_TL3X_PARENT(schq); } else if (lvl == NIX_TXSCH_LVL_TL4) { smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); parent_off = NIX_AF_TL4X_PARENT(schq); } else if (lvl == NIX_TXSCH_LVL_MDQ) { smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); parent_off = NIX_AF_MDQX_PARENT(schq); } /* save cir/pir register values */ smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); if (smq_tree_ctx->pir_off) smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); /* get parent txsch node */ if (parent_off) { regval = rvu_read64(rvu, blkaddr, parent_off); schq = (regval >> 16) & 0x1FF; } } } static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) { struct nix_txsch *txsch; struct nix_hw *nix_hw; u64 regoff; int tl2; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return; /* loop through all TL2s with matching PF_FUNC */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { /* skip the smq(flush) TL2 */ if (tl2 == smq_flush_ctx->tl2_schq) continue; /* skip unused TL2s */ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) continue; /* skip if PF_FUNC doesn't match */ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & ~RVU_PFVF_FUNC_MASK))) continue; /* enable/disable XOFF */ regoff = NIX_AF_TL2X_SW_XOFF(tl2); if (enable) rvu_write64(rvu, blkaddr, regoff, 0x1); else rvu_write64(rvu, blkaddr, regoff, 0x0); } } static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) { u64 cir_off, pir_off, cir_val, pir_val; struct nix_smq_tree_ctx *smq_tree_ctx; int lvl; for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; cir_off = smq_tree_ctx->cir_off; cir_val = smq_tree_ctx->cir_val; pir_off = smq_tree_ctx->pir_off; pir_val = smq_tree_ctx->pir_val; if (enable) { rvu_write64(rvu, blkaddr, cir_off, cir_val); if (lvl != NIX_TXSCH_LVL_TL1) rvu_write64(rvu, blkaddr, pir_off, pir_val); } else { rvu_write64(rvu, blkaddr, cir_off, 0x0); if (lvl != NIX_TXSCH_LVL_TL1) rvu_write64(rvu, blkaddr, pir_off, 0x0); } } } static int nix_smq_flush(struct rvu *rvu, int blkaddr, int smq, u16 pcifunc, int nixlf) { struct nix_smq_flush_ctx *smq_flush_ctx; int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; int err, restore_tx_en = 0; u64 cfg; if (!is_rvu_otx2(rvu)) { /* Skip SMQ flush if pkt count is zero */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); if (!cfg) return 0; } /* enable cgx tx if disabled */ if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, true); } /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); if (!smq_flush_ctx) return -ENOMEM; nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); /* Do SMQ flush and set enqueue xoff */ cfg |= BIT_ULL(50) | BIT_ULL(49); rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); /* Disable backpressure from physical link, * otherwise SMQ flush may stall. */ rvu_cgx_enadis_rx_bp(rvu, pf, false); /* Wait for flush to complete */ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); if (err) dev_info(rvu->dev, "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", nixlf, smq); /* clear XOFF on TL2s */ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); kfree(smq_flush_ctx); rvu_cgx_enadis_rx_bp(rvu, pf, true); /* restore cgx tx state */ if (restore_tx_en) rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); return err; } static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) { int blkaddr, nixlf, lvl, schq, err; struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; u16 map_func; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ mutex_lock(&rvu->rsrc_lock); for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } } nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, nix_get_tx_link(rvu, pcifunc)); /* On PF cleanup, clear cfg done flag as * PF would have changed default config. */ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; schq = nix_get_tx_link(rvu, pcifunc); /* Do not clear pcifunc in txsch->pfvf_map[schq] because * VF might be using this TL1 queue */ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); } /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); } /* Now free scheduler queues to free pool */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { /* TLs above aggregation level are shared across all PF * and it's VFs, hence skip freeing them. */ if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); rvu_free_rsrc(&txsch->schq, schq); txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } } mutex_unlock(&rvu->rsrc_lock); /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); return 0; } static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int lvl, schq, nixlf, blkaddr; struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; int rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; lvl = req->schq_lvl; schq = req->schq; txsch = &nix_hw->txsch[lvl]; if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) return 0; pfvf_map = txsch->pfvf_map; mutex_lock(&rvu->rsrc_lock); if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { rc = NIX_AF_ERR_TLX_INVALID; goto err; } /* Clear SW_XOFF of this resource only. * For SMQ level, all path XOFF's * need to be made clear by user */ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ if (lvl == NIX_TXSCH_LVL_SMQ && nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { rc = NIX_AF_SMQ_FLUSH_FAILED; goto err; } nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); mutex_unlock(&rvu->rsrc_lock); return 0; err: mutex_unlock(&rvu->rsrc_lock); return rc; } int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp) { if (req->flags & TXSCHQ_FREE_ALL) return nix_txschq_free(rvu, req->hdr.pcifunc); else return nix_txschq_free_one(rvu, req); } static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, int lvl, u64 reg, u64 regval) { u64 regbase = reg & 0xFFFF; u16 schq, parent; if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg)) return false; schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); /* Check if this schq belongs to this PF/VF or not */ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq)) return false; parent = (regval >> 16) & 0x1FF; /* Validate MDQ's TL4 parent */ if (regbase == NIX_AF_MDQX_PARENT(0) && !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent)) return false; /* Validate TL4's TL3 parent */ if (regbase == NIX_AF_TL4X_PARENT(0) && !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent)) return false; /* Validate TL3's TL2 parent */ if (regbase == NIX_AF_TL3X_PARENT(0) && !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent)) return false; /* Validate TL2's TL1 parent */ if (regbase == NIX_AF_TL2X_PARENT(0) && !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent)) return false; return true; } static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) { u64 regbase; if (hw->cap.nix_shaping) return true; /* If shaping and coloring is not supported, then * *_CIR and *_PIR registers should not be configured. */ regbase = reg & 0xFFFF; switch (lvl) { case NIX_TXSCH_LVL_TL1: if (regbase == NIX_AF_TL1X_CIR(0)) return false; break; case NIX_TXSCH_LVL_TL2: if (regbase == NIX_AF_TL2X_CIR(0) || regbase == NIX_AF_TL2X_PIR(0)) return false; break; case NIX_TXSCH_LVL_TL3: if (regbase == NIX_AF_TL3X_CIR(0) || regbase == NIX_AF_TL3X_PIR(0)) return false; break; case NIX_TXSCH_LVL_TL4: if (regbase == NIX_AF_TL4X_CIR(0) || regbase == NIX_AF_TL4X_PIR(0)) return false; break; case NIX_TXSCH_LVL_MDQ: if (regbase == NIX_AF_MDQX_CIR(0) || regbase == NIX_AF_MDQX_PIR(0)) return false; break; } return true; } static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, u16 pcifunc, int blkaddr) { u32 *pfvf_map; int schq; schq = nix_get_tx_link(rvu, pcifunc); pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; /* Skip if PF has already done the config */ if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) return; rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), (TXSCH_TL1_DFLT_RR_PRIO << 1)); /* On OcteonTx2 the config was in bytes and newer silcons * it's changed to weight. */ if (!rvu->hw->cap.nix_common_dwrr_mtu) rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), TXSCH_TL1_DFLT_RR_QTM); else rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), CN10K_MAX_DWRR_WEIGHT); rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } /* Register offset - [15:0] * Scheduler Queue number - [25:16] */ #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr, struct nix_txschq_config *req, struct nix_txschq_config *rsp) { u16 pcifunc = req->hdr.pcifunc; int idx, schq; u64 reg; for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; reg &= NIX_TX_SCHQ_MASK; schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) return NIX_AF_INVAL_TXSCHQ_CFG; rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); } rsp->lvl = req->lvl; rsp->num_regs = req->num_regs; return 0; } void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, struct nix_txsch *txsch, bool enable) { struct rvu_hwinfo *hw = rvu->hw; int lbk_link_start, lbk_links; u8 pf = rvu_get_pf(pcifunc); int schq; u64 cfg; if (!is_pf_cgxmapped(rvu, pf)) return; cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; lbk_link_start = hw->cgx_links; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; /* Enable all LBK links with channel 63 by default so that * packets can be sent to LBK with a NPC TX MCAM rule */ lbk_links = hw->lbk_links; while (lbk_links--) rvu_write64(rvu, blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, lbk_link_start + lbk_links), cfg); } } int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct nix_txschq_config *rsp) { u64 reg, val, regval, schq_regbase, val_mask; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct nix_txsch *txsch; struct nix_hw *nix_hw; int blkaddr, idx, err; int nixlf, schq; u32 *pfvf_map; if (req->lvl >= NIX_TXSCH_LVL_CNT || req->num_regs > MAX_REGS_PER_MBOX_MSG) return NIX_AF_INVAL_TXSCHQ_CFG; err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (err) return err; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; if (req->read) return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; if (req->lvl >= hw->cap.nix_tx_aggr_lvl && pcifunc & RVU_PFVF_FUNC_MASK) { mutex_lock(&rvu->rsrc_lock); if (req->lvl == NIX_TXSCH_LVL_TL1) nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); mutex_unlock(&rvu->rsrc_lock); return 0; } for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; reg &= NIX_TX_SCHQ_MASK; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; val_mask = req->regval_mask[idx]; if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, txsch->lvl, reg, regval)) return NIX_AF_INVAL_TXSCHQ_CFG; /* Check if shaping and coloring is supported */ if (!is_txschq_shaping_valid(hw, req->lvl, reg)) continue; val = rvu_read64(rvu, blkaddr, reg); regval = (val & val_mask) | (regval & ~val_mask); /* Handle shaping state toggle specially */ if (hw->cap.nix_shaper_toggle_wait && handle_txschq_shaper_update(rvu, blkaddr, nixlf, req->lvl, reg, regval)) continue; /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); regval &= ~(0x7FULL << 24); regval |= ((u64)nixlf << 24); } /* Clear 'BP_ENA' config, if it's not allowed */ if (!hw->cap.nix_tx_link_bp) { if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || (schq_regbase & 0xFF00) == NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) regval &= ~BIT_ULL(13); } /* Mark config as done for TL1 by PF */ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); mutex_lock(&rvu->rsrc_lock); pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); mutex_unlock(&rvu->rsrc_lock); } /* SMQ flush is special hence split register writes such * that flush first and write rest of the bits later. */ if (schq_regbase == NIX_AF_SMQX_CFG(0) && (regval & BIT_ULL(49))) { schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); regval &= ~BIT_ULL(49); } rvu_write64(rvu, blkaddr, reg, regval); } return 0; } static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, struct nix_vtag_config *req) { u64 regval = req->vtag_size; if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || req->vtag_size > VTAGSIZE_T8) return -EINVAL; /* RX VTAG Type 7 reserved for vf vlan */ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) return NIX_AF_ERR_RX_VTAG_INUSE; if (req->rx.capture_vtag) regval |= BIT_ULL(5); if (req->rx.strip_vtag) regval |= BIT_ULL(4); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); return 0; } static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, u16 pcifunc, int index) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); struct nix_txvlan *vlan; if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; vlan = &nix_hw->txvlan; if (vlan->entry2pfvf_map[index] != pcifunc) return NIX_AF_ERR_PARAM; rvu_write64(rvu, blkaddr, NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); rvu_write64(rvu, blkaddr, NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); vlan->entry2pfvf_map[index] = 0; rvu_free_rsrc(&vlan->rsrc, index); return 0; } static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) { struct nix_txvlan *vlan; struct nix_hw *nix_hw; int index, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return; vlan = &nix_hw->txvlan; mutex_lock(&vlan->rsrc_lock); /* Scan all the entries and free the ones mapped to 'pcifunc' */ for (index = 0; index < vlan->rsrc.max; index++) { if (vlan->entry2pfvf_map[index] == pcifunc) nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); } mutex_unlock(&vlan->rsrc_lock); } static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, u64 vtag, u8 size) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); struct nix_txvlan *vlan; u64 regval; int index; if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; vlan = &nix_hw->txvlan; mutex_lock(&vlan->rsrc_lock); index = rvu_alloc_rsrc(&vlan->rsrc); if (index < 0) { mutex_unlock(&vlan->rsrc_lock); return index; } mutex_unlock(&vlan->rsrc_lock); regval = size ? vtag : vtag << 32; rvu_write64(rvu, blkaddr, NIX_AF_TX_VTAG_DEFX_DATA(index), regval); rvu_write64(rvu, blkaddr, NIX_AF_TX_VTAG_DEFX_CTL(index), size); return index; } static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); u16 pcifunc = req->hdr.pcifunc; int idx0 = req->tx.vtag0_idx; int idx1 = req->tx.vtag1_idx; struct nix_txvlan *vlan; int err = 0; if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; vlan = &nix_hw->txvlan; if (req->tx.free_vtag0 && req->tx.free_vtag1) if (vlan->entry2pfvf_map[idx0] != pcifunc || vlan->entry2pfvf_map[idx1] != pcifunc) return NIX_AF_ERR_PARAM; mutex_lock(&vlan->rsrc_lock); if (req->tx.free_vtag0) { err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); if (err) goto exit; } if (req->tx.free_vtag1) err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); exit: mutex_unlock(&vlan->rsrc_lock); return err; } static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req, struct nix_vtag_config_rsp *rsp) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); struct nix_txvlan *vlan; u16 pcifunc = req->hdr.pcifunc; if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; vlan = &nix_hw->txvlan; if (req->tx.cfg_vtag0) { rsp->vtag0_idx = nix_tx_vtag_alloc(rvu, blkaddr, req->tx.vtag0, req->vtag_size); if (rsp->vtag0_idx < 0) return NIX_AF_ERR_TX_VTAG_NOSPC; vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; } if (req->tx.cfg_vtag1) { rsp->vtag1_idx = nix_tx_vtag_alloc(rvu, blkaddr, req->tx.vtag1, req->vtag_size); if (rsp->vtag1_idx < 0) goto err_free; vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; } return 0; err_free: if (req->tx.cfg_vtag0) nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); return NIX_AF_ERR_TX_VTAG_NOSPC; } int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct nix_vtag_config_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; int blkaddr, nixlf, err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (err) return err; if (req->cfg_type) { /* rx vtag configuration */ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); if (err) return NIX_AF_ERR_PARAM; } else { /* tx vtag configuration */ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && (req->tx.free_vtag0 || req->tx.free_vtag1)) return NIX_AF_ERR_PARAM; if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); if (req->tx.free_vtag0 || req->tx.free_vtag1) return nix_tx_vtag_decfg(rvu, blkaddr, req); } return 0; } static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, int mce, u8 op, u16 pcifunc, int next, bool eol) { struct nix_aq_enq_req aq_req; int err; aq_req.hdr.pcifunc = 0; aq_req.ctype = NIX_AQ_CTYPE_MCE; aq_req.op = op; aq_req.qidx = mce; /* Use RSS with RSS index 0 */ aq_req.mce.op = 1; aq_req.mce.index = 0; aq_req.mce.eol = eol; aq_req.mce.pf_func = pcifunc; aq_req.mce.next = next; /* All fields valid */ *(u64 *)(&aq_req.mce_mask) = ~0ULL; err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); return err; } return 0; } static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, u16 pcifunc, bool add) { struct mce *mce, *tail = NULL; bool delete = false; /* Scan through the current list */ hlist_for_each_entry(mce, &mce_list->head, node) { /* If already exists, then delete */ if (mce->pcifunc == pcifunc && !add) { delete = true; break; } else if (mce->pcifunc == pcifunc && add) { /* entry already exists */ return 0; } tail = mce; } if (delete) { hlist_del(&mce->node); kfree(mce); mce_list->count--; return 0; } if (!add) return 0; /* Add a new one to the list, at the tail */ mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; mce->pcifunc = pcifunc; if (!tail) hlist_add_head(&mce->node, &mce_list->head); else hlist_add_behind(&mce->node, &tail->node); mce_list->count++; return 0; } int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, struct nix_mce_list *mce_list, int mce_idx, int mcam_index, bool add) { int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_mcast *mcast; struct nix_hw *nix_hw; struct mce *mce; if (!mce_list) return -EINVAL; /* Get this PF/VF func's MCE index */ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); if (idx > (mce_idx + mce_list->max)) { dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, pcifunc >> RVU_PFVF_PF_SHIFT); return -EINVAL; } err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) return err; mcast = &nix_hw->mcast; mutex_lock(&mcast->mce_lock); err = nix_update_mce_list_entry(mce_list, pcifunc, add); if (err) goto end; /* Disable MCAM entry in NPC */ if (!mce_list->count) { npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); goto end; } /* Dump the updated list to HW */ idx = mce_idx; last_idx = idx + mce_list->count - 1; hlist_for_each_entry(mce, &mce_list->head, node) { if (idx > last_idx) break; next_idx = idx + 1; /* EOL should be set in last MCE */ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, mce->pcifunc, next_idx, (next_idx > last_idx) ? true : false); if (err) goto end; idx++; } end: mutex_unlock(&mcast->mce_lock); return err; } void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, struct nix_mce_list **mce_list, int *mce_idx) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_pfvf *pfvf; if (!hw->cap.nix_rx_multicast || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { *mce_list = NULL; *mce_idx = 0; return; } /* Get this PF/VF func's MCE index */ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); if (type == NIXLF_BCAST_ENTRY) { *mce_list = &pfvf->bcast_mce_list; *mce_idx = pfvf->bcast_mce_idx; } else if (type == NIXLF_ALLMULTI_ENTRY) { *mce_list = &pfvf->mcast_mce_list; *mce_idx = pfvf->mcast_mce_idx; } else if (type == NIXLF_PROMISC_ENTRY) { *mce_list = &pfvf->promisc_mce_list; *mce_idx = pfvf->promisc_mce_idx; } else { *mce_list = NULL; *mce_idx = 0; } } static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int type, bool add) { int err = 0, nixlf, blkaddr, mcam_index, mce_idx; struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_mce_list *mce_list; int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; pf = rvu_get_pf(pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return -EINVAL; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return -EINVAL; nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); mcam_index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK, nixlf, type); err = nix_update_mce_list(rvu, pcifunc, mce_list, mce_idx, mcam_index, add); return err; } static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_mcast *mcast = &nix_hw->mcast; int err, pf, numvfs, idx; struct rvu_pfvf *pfvf; u16 pcifunc; u64 cfg; /* Skip PF0 (i.e AF) */ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) { cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); /* If PF is not enabled, nothing to do */ if (!((cfg >> 20) & 0x01)) continue; /* Get numVFs attached to this PF */ numvfs = (cfg >> 12) & 0xFF; pfvf = &rvu->pf[pf]; /* This NIX0/1 block mapped to PF ? */ if (pfvf->nix_blkaddr != nix_hw->blkaddr) continue; /* save start idx of broadcast mce list */ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); /* save start idx of multicast mce list */ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); /* save the start idx of promisc mce list */ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ pcifunc = (pf << RVU_PFVF_PF_SHIFT); pcifunc |= idx; /* Add dummy entries now, so that we don't have to check * for whether AQ_OP should be INIT/WRITE later on. * Will be updated when a NIXLF is attached/detached to * these PF/VFs. */ err = nix_blk_setup_mce(rvu, nix_hw, pfvf->bcast_mce_idx + idx, NIX_AQ_INSTOP_INIT, pcifunc, 0, true); if (err) return err; /* add dummy entries to multicast mce list */ err = nix_blk_setup_mce(rvu, nix_hw, pfvf->mcast_mce_idx + idx, NIX_AQ_INSTOP_INIT, pcifunc, 0, true); if (err) return err; /* add dummy entries to promisc mce list */ err = nix_blk_setup_mce(rvu, nix_hw, pfvf->promisc_mce_idx + idx, NIX_AQ_INSTOP_INIT, pcifunc, 0, true); if (err) return err; } } return 0; } static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_mcast *mcast = &nix_hw->mcast; struct rvu_hwinfo *hw = rvu->hw; int err, size; size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; size = (1ULL << size); /* Alloc memory for multicast/mirror replication entries */ err = qmem_alloc(rvu->dev, &mcast->mce_ctx, (256UL << MC_TBL_SIZE), size); if (err) return -ENOMEM; rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, (u64)mcast->mce_ctx->iova); /* Set max list length equal to max no of VFs per PF + PF itself */ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG, BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE); /* Alloc memory for multicast replication buffers */ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; err = qmem_alloc(rvu->dev, &mcast->mcast_buf, (8UL << MC_BUF_CNT), size); if (err) return -ENOMEM; rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, (u64)mcast->mcast_buf->iova); /* Alloc pkind for NIX internal RX multicast/mirror replay */ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc); rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG, BIT_ULL(63) | (mcast->replay_pkind << 24) | BIT_ULL(20) | MC_BUF_CNT); mutex_init(&mcast->mce_lock); return nix_setup_mce_tables(rvu, nix_hw); } static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_txvlan *vlan = &nix_hw->txvlan; int err; /* Allocate resource bimap for tx vtag def registers*/ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; err = rvu_alloc_bitmap(&vlan->rsrc); if (err) return -ENOMEM; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, sizeof(u16), GFP_KERNEL); if (!vlan->entry2pfvf_map) goto free_mem; mutex_init(&vlan->rsrc_lock); return 0; free_mem: kfree(vlan->rsrc.bmap); return -ENOMEM; } static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_txsch *txsch; int err, lvl, schq; u64 cfg, reg; /* Get scheduler queue count of each type and alloc * bitmap for each for alloc/free/attach operations. */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; txsch->lvl = lvl; switch (lvl) { case NIX_TXSCH_LVL_SMQ: reg = NIX_AF_MDQ_CONST; break; case NIX_TXSCH_LVL_TL4: reg = NIX_AF_TL4_CONST; break; case NIX_TXSCH_LVL_TL3: reg = NIX_AF_TL3_CONST; break; case NIX_TXSCH_LVL_TL2: reg = NIX_AF_TL2_CONST; break; case NIX_TXSCH_LVL_TL1: reg = NIX_AF_TL1_CONST; break; } cfg = rvu_read64(rvu, blkaddr, reg); txsch->schq.max = cfg & 0xFFFF; err = rvu_alloc_bitmap(&txsch->schq); if (err) return err; /* Allocate memory for scheduler queues to * PF/VF pcifunc mapping info. */ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, sizeof(u32), GFP_KERNEL); if (!txsch->pfvf_map) return -ENOMEM; for (schq = 0; schq < txsch->schq.max; schq++) txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } /* Setup a default value of 8192 as DWRR MTU */ if (rvu->hw->cap.nix_common_dwrr_mtu || rvu->hw->cap.nix_multiple_dwrr_mtu) { rvu_write64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), convert_bytes_to_dwrr_mtu(8192)); rvu_write64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), convert_bytes_to_dwrr_mtu(8192)); rvu_write64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), convert_bytes_to_dwrr_mtu(8192)); } return 0; } int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr, u32 cfg) { int fmt_idx; for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { if (nix_hw->mark_format.cfg[fmt_idx] == cfg) return fmt_idx; } if (fmt_idx >= nix_hw->mark_format.total) return -ERANGE; rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); nix_hw->mark_format.cfg[fmt_idx] = cfg; nix_hw->mark_format.in_use++; return fmt_idx; } static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { u64 cfgs[] = { [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, }; int i, rc; u64 total; total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; nix_hw->mark_format.total = (u8)total; nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), GFP_KERNEL); if (!nix_hw->mark_format.cfg) return -ENOMEM; for (i = 0; i < NIX_MARK_CFG_MAX; i++) { rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); if (rc < 0) dev_err(rvu->dev, "Err %d in setup mark format %d\n", i, rc); } return 0; } static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) { /* CN10K supports LBK FIFO size 72 KB */ if (rvu->hw->lbk_bufsize == 0x12000) *max_mtu = CN10K_LBK_LINK_MAX_FRS; else *max_mtu = NIC_HW_MAX_FRS; } static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) { int fifo_size = rvu_cgx_get_fifolen(rvu); /* RPM supports FIFO len 128 KB and RPM2 supports double the * FIFO len to accommodate 8 LMACS */ if (fifo_size == 0x20000 || fifo_size == 0x40000) *max_mtu = CN10K_LMAC_LINK_MAX_FRS; else *max_mtu = NIC_HW_MAX_FRS; } int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, struct nix_hw_info *rsp) { u16 pcifunc = req->hdr.pcifunc; u64 dwrr_mtu; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; if (is_afvf(pcifunc)) rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); else rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); rsp->min_mtu = NIC_HW_MIN_FRS; if (!rvu->hw->cap.nix_common_dwrr_mtu && !rvu->hw->cap.nix_multiple_dwrr_mtu) { /* Return '1' on OTx2 */ rsp->rpm_dwrr_mtu = 1; rsp->sdp_dwrr_mtu = 1; rsp->lbk_dwrr_mtu = 1; return 0; } /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ dwrr_mtu = rvu_read64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); dwrr_mtu = rvu_read64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); dwrr_mtu = rvu_read64(rvu, blkaddr, nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); return 0; } int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; int i, nixlf, blkaddr, err; u64 stats; err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (err) return err; /* Get stats count supported by HW */ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); /* Reset tx stats */ for (i = 0; i < ((stats >> 24) & 0xFF); i++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0); /* Reset rx stats */ for (i = 0; i < ((stats >> 32) & 0xFF); i++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0); return 0; } /* Returns the ALG index to be set into NPC_RX_ACTION */ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) { int i; /* Scan over exiting algo entries to find a match */ for (i = 0; i < nix_hw->flowkey.in_use; i++) if (nix_hw->flowkey.flowkey[i] == flow_cfg) return i; return -ERANGE; } static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { int idx, nr_field, key_off, field_marker, keyoff_marker; int max_key_off, max_bit_pos, group_member; struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; u32 l3_l4_src_dst; int l4_key_offset = 0; if (!alg) return -EINVAL; #define FIELDS_PER_ALG 5 #define MAX_KEY_OFF 40 /* Clear all fields */ memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); /* Each of the 32 possible flow key algorithm definitions should * fall into above incremental config (except ALG0). Otherwise a * single NPC MCAM entry is not sufficient for supporting RSS. * * If a different definition or combination needed then NPC MCAM * has to be programmed to filter such pkts and it's action should * point to this definition to calculate flowtag or hash. * * The `for loop` goes over _all_ protocol field and the following * variables depicts the state machine forward progress logic. * * keyoff_marker - Enabled when hash byte length needs to be accounted * in field->key_offset update. * field_marker - Enabled when a new field needs to be selected. * group_member - Enabled when protocol is part of a group. */ /* Last 4 bits (31:28) are reserved to specify SRC, DST * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST */ l3_l4_src_dst = flow_cfg; /* Reset these 4 bits, so that these won't be part of key */ flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; keyoff_marker = 0; max_key_off = 0; group_member = 0; nr_field = 0; key_off = 0; field_marker = 1; field = &tmp; max_bit_pos = fls(flow_cfg); for (idx = 0; idx < max_bit_pos && nr_field < FIELDS_PER_ALG && key_off < MAX_KEY_OFF; idx++) { key_type = BIT(idx); valid_key = flow_cfg & key_type; /* Found a field marker, reset the field values */ if (field_marker) memset(&tmp, 0, sizeof(tmp)); field_marker = true; keyoff_marker = true; switch (key_type) { case NIX_FLOW_KEY_TYPE_PORT: field->sel_chan = true; /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; break; case NIX_FLOW_KEY_TYPE_IPV4_PROTO: field->lid = NPC_LID_LC; field->hdr_offset = 9; /* offset */ field->bytesm1 = 0; /* 1 byte */ field->ltype_match = NPC_LT_LC_IP; field->ltype_mask = 0xF; break; case NIX_FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_INNR_IPV4: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP; if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { field->lid = NPC_LID_LG; field->ltype_match = NPC_LT_LG_TU_IP; } field->hdr_offset = 12; /* SIP offset */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ /* Only SIP */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) field->bytesm1 = 3; /* SIP, 4 bytes */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { /* Both SIP + DIP */ if (field->bytesm1 == 3) { field->bytesm1 = 7; /* SIP + DIP, 8B */ } else { /* Only DIP */ field->hdr_offset = 16; /* DIP off */ field->bytesm1 = 3; /* DIP, 4 bytes */ } } field->ltype_mask = 0xF; /* Match only IPv4 */ keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: case NIX_FLOW_KEY_TYPE_INNR_IPV6: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP6; if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { field->lid = NPC_LID_LG; field->ltype_match = NPC_LT_LG_TU_IP6; } field->hdr_offset = 8; /* SIP offset */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ /* Only SIP */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) field->bytesm1 = 15; /* SIP, 16 bytes */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { /* Both SIP + DIP */ if (field->bytesm1 == 15) { /* SIP + DIP, 32 bytes */ field->bytesm1 = 31; } else { /* Only DIP */ field->hdr_offset = 24; /* DIP off */ field->bytesm1 = 15; /* DIP,16 bytes */ } } field->ltype_mask = 0xF; /* Match only IPv6 */ break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: case NIX_FLOW_KEY_TYPE_SCTP: case NIX_FLOW_KEY_TYPE_INNR_TCP: case NIX_FLOW_KEY_TYPE_INNR_UDP: case NIX_FLOW_KEY_TYPE_INNR_SCTP: field->lid = NPC_LID_LD; if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) field->lid = NPC_LID_LH; field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) field->bytesm1 = 1; /* SRC, 2 bytes */ if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { /* Both SRC + DST */ if (field->bytesm1 == 1) { /* SRC + DST, 4 bytes */ field->bytesm1 = 3; } else { /* Only DIP */ field->hdr_offset = 2; /* DST off */ field->bytesm1 = 1; /* DST, 2 bytes */ } } /* Enum values for NPC_LID_LD and NPC_LID_LG are same, * so no need to change the ltype_match, just change * the lid for inner protocols */ BUILD_BUG_ON((int)NPC_LT_LD_TCP != (int)NPC_LT_LH_TU_TCP); BUILD_BUG_ON((int)NPC_LT_LD_UDP != (int)NPC_LT_LH_TU_UDP); BUILD_BUG_ON((int)NPC_LT_LD_SCTP != (int)NPC_LT_LH_TU_SCTP); if ((key_type == NIX_FLOW_KEY_TYPE_TCP || key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && valid_key) { field->ltype_match |= NPC_LT_LD_TCP; group_member = true; } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && valid_key) { field->ltype_match |= NPC_LT_LD_UDP; group_member = true; } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && valid_key) { field->ltype_match |= NPC_LT_LD_SCTP; group_member = true; } field->ltype_mask = ~field->ltype_match; if (key_type == NIX_FLOW_KEY_TYPE_SCTP || key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { /* Handle the case where any of the group item * is enabled in the group but not the final one */ if (group_member) { valid_key = true; group_member = false; } } else { field_marker = false; keyoff_marker = false; } /* TCP/UDP/SCTP and ESP/AH falls at same offset so * remember the TCP key offset of 40 byte hash key. */ if (key_type == NIX_FLOW_KEY_TYPE_TCP) l4_key_offset = key_off; break; case NIX_FLOW_KEY_TYPE_NVGRE: field->lid = NPC_LID_LD; field->hdr_offset = 4; /* VSID offset */ field->bytesm1 = 2; field->ltype_match = NPC_LT_LD_NVGRE; field->ltype_mask = 0xF; break; case NIX_FLOW_KEY_TYPE_VXLAN: case NIX_FLOW_KEY_TYPE_GENEVE: field->lid = NPC_LID_LE; field->bytesm1 = 2; field->hdr_offset = 4; field->ltype_mask = 0xF; field_marker = false; keyoff_marker = false; if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { field->ltype_match |= NPC_LT_LE_VXLAN; group_member = true; } if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { field->ltype_match |= NPC_LT_LE_GENEVE; group_member = true; } if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { if (group_member) { field->ltype_mask = ~field->ltype_match; field_marker = true; keyoff_marker = true; valid_key = true; group_member = false; } } break; case NIX_FLOW_KEY_TYPE_ETH_DMAC: case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: field->lid = NPC_LID_LA; field->ltype_match = NPC_LT_LA_ETHER; if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { field->lid = NPC_LID_LF; field->ltype_match = NPC_LT_LF_TU_ETHER; } field->hdr_offset = 0; field->bytesm1 = 5; /* DMAC 6 Byte */ field->ltype_mask = 0xF; break; case NIX_FLOW_KEY_TYPE_IPV6_EXT: field->lid = NPC_LID_LC; field->hdr_offset = 40; /* IPV6 hdr */ field->bytesm1 = 0; /* 1 Byte ext hdr*/ field->ltype_match = NPC_LT_LC_IP6_EXT; field->ltype_mask = 0xF; break; case NIX_FLOW_KEY_TYPE_GTPU: field->lid = NPC_LID_LE; field->hdr_offset = 4; field->bytesm1 = 3; /* 4 bytes TID*/ field->ltype_match = NPC_LT_LE_GTPU; field->ltype_mask = 0xF; break; case NIX_FLOW_KEY_TYPE_VLAN: field->lid = NPC_LID_LB; field->hdr_offset = 2; /* Skip TPID (2-bytes) */ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ field->ltype_match = NPC_LT_LB_CTAG; field->ltype_mask = 0xF; field->fn_mask = 1; /* Mask out the first nibble */ break; case NIX_FLOW_KEY_TYPE_AH: case NIX_FLOW_KEY_TYPE_ESP: field->hdr_offset = 0; field->bytesm1 = 7; /* SPI + sequence number */ field->ltype_mask = 0xF; field->lid = NPC_LID_LE; field->ltype_match = NPC_LT_LE_ESP; if (key_type == NIX_FLOW_KEY_TYPE_AH) { field->lid = NPC_LID_LD; field->ltype_match = NPC_LT_LD_AH; field->hdr_offset = 4; keyoff_marker = false; } break; } field->ena = 1; /* Found a valid flow key type */ if (valid_key) { /* Use the key offset of TCP/UDP/SCTP fields * for ESP/AH fields. */ if (key_type == NIX_FLOW_KEY_TYPE_ESP || key_type == NIX_FLOW_KEY_TYPE_AH) key_off = l4_key_offset; field->key_offset = key_off; memcpy(&alg[nr_field], field, sizeof(*field)); max_key_off = max(max_key_off, field->bytesm1 + 1); /* Found a field marker, get the next field */ if (field_marker) nr_field++; } /* Found a keyoff marker, update the new key_off */ if (keyoff_marker) { key_off += max_key_off; max_key_off = 0; } } /* Processed all the flow key types */ if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) return 0; else return NIX_AF_ERR_RSS_NOSPC_FIELD; } static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) { u64 field[FIELDS_PER_ALG]; struct nix_hw *hw; int fid, rc; hw = get_nix_hw(rvu->hw, blkaddr); if (!hw) return NIX_AF_ERR_INVALID_NIXBLK; /* No room to add new flow hash algoritham */ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) return NIX_AF_ERR_RSS_NOSPC_ALGO; /* Generate algo fields for the given flow_cfg */ rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); if (rc) return rc; /* Update ALGX_FIELDX register with generated fields */ for (fid = 0; fid < FIELDS_PER_ALG; fid++) rvu_write64(rvu, blkaddr, NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, fid), field[fid]); /* Store the flow_cfg for futher lookup */ rc = hw->flowkey.in_use; hw->flowkey.flowkey[rc] = flow_cfg; hw->flowkey.in_use++; return rc; } int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct nix_rss_flowkey_cfg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; int alg_idx, nixlf, blkaddr; struct nix_hw *nix_hw; int err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (err) return err; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); /* Failed to get algo index from the exiting list, reserve new */ if (alg_idx < 0) { alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, req->flowkey_cfg); if (alg_idx < 0) return alg_idx; } rsp->alg_idx = alg_idx; rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, alg_idx, req->mcam_index); return 0; } static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) { u32 flowkey_cfg, minkey_cfg; int alg, fid, rc; /* Disable all flow key algx fieldx */ for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { for (fid = 0; fid < FIELDS_PER_ALG; fid++) rvu_write64(rvu, blkaddr, NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), 0); } /* IPv4/IPv6 SIP/DIPs */ flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ minkey_cfg = flowkey_cfg; flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_SCTP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); if (rc < 0) return rc; return 0; } int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; u16 pcifunc = req->hdr.pcifunc; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); if (err) return err; pfvf = rvu_get_pfvf(rvu, pcifunc); /* untrusted VF can't overwrite admin(PF) changes */ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { dev_warn(rvu->dev, "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); return -EPERM; } ether_addr_copy(pfvf->mac_addr, req->mac_addr); rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); rvu_switch_update_rules(rvu, pcifunc); return 0; } int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, struct msg_req *req, struct nix_get_mac_addr_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; if (!is_nixlf_attached(rvu, pcifunc)) return NIX_AF_ERR_AF_LF_INVALID; pfvf = rvu_get_pfvf(rvu, pcifunc); ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) { bool allmulti, promisc, nix_rx_multicast; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int nixlf, err; pfvf = rvu_get_pfvf(rvu, pcifunc); promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; if (is_vf(pcifunc) && !nix_rx_multicast && (promisc || allmulti)) { dev_warn_ratelimited(rvu->dev, "VF promisc/multicast not supported\n"); return 0; } /* untrusted VF can't configure promisc/allmulti */ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && (promisc || allmulti)) return 0; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; if (nix_rx_multicast) { /* add/del this PF_FUNC to/from mcast pkt replication list */ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, allmulti); if (err) { dev_err(rvu->dev, "Failed to update pcifunc 0x%x to multicast list\n", pcifunc); return err; } /* add/del this PF_FUNC to/from promisc pkt replication list */ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, promisc); if (err) { dev_err(rvu->dev, "Failed to update pcifunc 0x%x to promisc list\n", pcifunc); return err; } } /* install/uninstall allmulti entry */ if (allmulti) { rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); } else { if (!nix_rx_multicast) rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); } /* install/uninstall promisc entry */ if (promisc) rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); else if (!nix_rx_multicast) rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); return 0; } static void nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) { int pf = rvu_get_pf(pcifunc); struct rvu_pfvf *pfvf; int maxlen, minlen; int numvfs, hwvf; int vf; /* Update with requester's min/max lengths */ pfvf = rvu_get_pfvf(rvu, pcifunc); pfvf->maxlen = req->maxlen; if (req->update_minlen) pfvf->minlen = req->minlen; maxlen = req->maxlen; minlen = req->update_minlen ? req->minlen : 0; /* Get this PF's numVFs and starting hwvf */ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); /* For each VF, compare requested max/minlen */ for (vf = 0; vf < numvfs; vf++) { pfvf = &rvu->hwvf[hwvf + vf]; if (pfvf->maxlen > maxlen) maxlen = pfvf->maxlen; if (req->update_minlen && pfvf->minlen && pfvf->minlen < minlen) minlen = pfvf->minlen; } /* Compare requested max/minlen with PF's max/minlen */ pfvf = &rvu->pf[pf]; if (pfvf->maxlen > maxlen) maxlen = pfvf->maxlen; if (req->update_minlen && pfvf->minlen && pfvf->minlen < minlen) minlen = pfvf->minlen; /* Update the request with max/min PF's and it's VF's max/min */ req->maxlen = maxlen; if (req->update_minlen) req->minlen = minlen; } static int nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, u16 pcifunc, u64 tx_credits) { struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; unsigned long poll_tmo; bool restore_tx_en = 0; struct nix_hw *nix_hw; u64 cfg, sw_xoff = 0; u32 schq = 0; u32 credits; int rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; if (tx_credits == nix_hw->tx_credits[link]) return 0; /* Enable cgx tx if disabled for credits to be back */ if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, true); } mutex_lock(&rvu->rsrc_lock); /* Disable new traffic to link */ if (hw->cap.nix_shaping) { schq = nix_get_tx_link(rvu, pcifunc); sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); } rc = NIX_AF_ERR_LINK_CREDITS; poll_tmo = jiffies + usecs_to_jiffies(200000); /* Wait for credits to return */ do { if (time_after(jiffies, poll_tmo)) goto exit; usleep_range(100, 200); cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); credits = (cfg >> 12) & 0xFFFFFULL; } while (credits != nix_hw->tx_credits[link]); cfg &= ~(0xFFFFFULL << 12); cfg |= (tx_credits << 12); rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); rc = 0; nix_hw->tx_credits[link] = tx_credits; exit: /* Enable traffic back */ if (hw->cap.nix_shaping && !sw_xoff) rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); /* Restore state of cgx tx */ if (restore_tx_en) rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); mutex_unlock(&rvu->rsrc_lock); return rc; } int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int pf = rvu_get_pf(pcifunc); int blkaddr, schq, link = -1; struct nix_txsch *txsch; u64 cfg, lmac_fifo_len; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; if (is_afvf(pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); else rvu_get_lmac_link_max_frs(rvu, &max_mtu); if (!req->sdp_link && req->maxlen > max_mtu) return NIX_AF_ERR_FRS_INVALID; if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) return NIX_AF_ERR_FRS_INVALID; /* Check if requester wants to update SMQ's */ if (!req->update_smq) goto rx_frscfg; /* Update min/maxlen in each of the SMQ attached to this PF/VF */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; mutex_lock(&rvu->rsrc_lock); for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); if (req->update_minlen) cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); } mutex_unlock(&rvu->rsrc_lock); rx_frscfg: /* Check if config is for SDP link */ if (req->sdp_link) { if (!hw->sdp_links) return NIX_AF_ERR_RX_LINK_INVALID; link = hw->cgx_links + hw->lbk_links; goto linkcfg; } /* Check if the request is from CGX mapped RVU PF */ if (is_pf_cgxmapped(rvu, pf)) { /* Get CGX and LMAC to which this PF is mapped and find link */ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); link = (cgx * hw->lmac_per_cgx) + lmac; } else if (pf == 0) { /* For VFs of PF0 ingress is LBK port, so config LBK link */ pfvf = rvu_get_pfvf(rvu, pcifunc); link = hw->cgx_links + pfvf->lbkid; } if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; linkcfg: nix_find_link_frs(rvu, req, pcifunc); cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) cfg = (cfg & ~0xFFFFULL) | req->minlen; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); if (req->sdp_link || pf == 0) return 0; /* Update transmit credits for CGX links */ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); if (!lmac_fifo_len) { dev_err(rvu->dev, "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", __func__, cgx, lmac); return 0; } return nix_config_link_credits(rvu, blkaddr, link, pcifunc, (lmac_fifo_len - req->maxlen) / 16); } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, struct msg_rsp *rsp) { int nixlf, blkaddr, err; u64 cfg; err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); if (err) return err; cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); /* Set the interface configuration */ if (req->len_verify & BIT(0)) cfg |= BIT_ULL(41); else cfg &= ~BIT_ULL(41); if (req->len_verify & BIT(1)) cfg |= BIT_ULL(40); else cfg &= ~BIT_ULL(40); if (req->len_verify & NIX_RX_DROP_RE) cfg |= BIT_ULL(32); else cfg &= ~BIT_ULL(32); if (req->csum_verify & BIT(0)) cfg |= BIT_ULL(37); else cfg &= ~BIT_ULL(37); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); return 0; } static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) { return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } static void nix_link_config(struct rvu *rvu, int blkaddr, struct nix_hw *nix_hw) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; u16 lbk_max_frs, lmac_max_frs; unsigned long lmac_bmap; u64 tx_credits, cfg; u64 lmac_fifo_len; int iter; rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts * as undersize and report them to SW as error pkts, hence * setting it to 40 bytes. */ for (link = 0; link < hw->cgx_links; link++) { rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); } for (link = hw->cgx_links; link < hw->lbk_links; link++) { rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); } if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); } /* Set credits for Tx links assuming max packet length allowed. * This will be reconfigured based on MTU set for PF/VF. */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); /* Skip when cgx is not available or lmac cnt is zero */ if (lmac_cnt <= 0) continue; slink = cgx * hw->lmac_per_cgx; /* Get LMAC id's from bitmap */ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); if (!lmac_fifo_len) { dev_err(rvu->dev, "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", __func__, cgx, iter); continue; } tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); link = iter + slink; nix_hw->tx_credits[link] = tx_credits; rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); } } /* Set Tx credits for LBK link */ slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); nix_hw->tx_credits[link] = tx_credits; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); } } static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) { int idx, err; u64 status; /* Start X2P bus calibration */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9)); /* Wait for calibration to complete */ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_STATUS, BIT_ULL(10), false); if (err) { dev_err(rvu->dev, "NIX X2P bus calibration failed\n"); return err; } status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); /* Check if CGX devices are ready */ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { /* Skip when cgx port is not available */ if (!rvu_cgx_pdata(idx, rvu) || (status & (BIT_ULL(16 + idx)))) continue; dev_err(rvu->dev, "CGX%d didn't respond to NIX X2P calibration\n", idx); err = -EBUSY; } /* Check if LBK is ready */ if (!(status & BIT_ULL(19))) { dev_err(rvu->dev, "LBK didn't respond to NIX X2P calibration\n"); err = -EBUSY; } /* Clear 'calibrate_x2p' bit */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9)); if (err || (status & 0x3FFULL)) dev_err(rvu->dev, "NIX X2P calibration failed, status 0x%llx\n", status); if (err) return err; return 0; } static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) { u64 cfg; int err; /* Set admin queue endianness */ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); #ifdef __BIG_ENDIAN cfg |= BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #else cfg &= ~BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #endif /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); cfg &= ~0x3FFEULL; #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING /* Disable caching of SQB aka SQEs */ cfg |= 0x04ULL; #endif rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); /* Result structure can be followed by RQ/SQ/CQ context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations. */ err = rvu_aq_alloc(rvu, &block->aq, Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s), ALIGN(sizeof(struct nix_aq_res_s), 128) + 256); if (err) return err; rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE); rvu_write64(rvu, block->addr, NIX_AF_AQ_BASE, (u64)block->aq->inst->iova); return 0; } static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; u64 hw_const; hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); /* On OcteonTx2 DWRR quantum is directly configured into each of * the transmit scheduler queues. And PF/VF drivers were free to * config any value upto 2^24. * On CN10K, HW is modified, the quantum configuration at scheduler * queues is in terms of weight. And SW needs to setup a base DWRR MTU * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do * 'DWRR MTU * weight' to get the quantum. * * Check if HW uses a common MTU for all DWRR quantum configs. * On OcteonTx2 this register field is '0'. */ if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) hw->cap.nix_common_dwrr_mtu = true; if (hw_const & BIT_ULL(61)) hw->cap.nix_multiple_dwrr_mtu = true; } static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) { const struct npc_lt_def_cfg *ltdefs; struct rvu_hwinfo *hw = rvu->hw; int blkaddr = nix_hw->blkaddr; struct rvu_block *block; int err; u64 cfg; block = &hw->block[blkaddr]; if (is_rvu_96xx_B0(rvu)) { /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt * internal state when conditional clocks are turned off. * Hence enable them. */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); /* Set chan/link to backpressure TL3 instead of TL2 */ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); /* Disable SQ manager's sticky mode operation (set TM6 = 0) * This sticky mode is known to cause SQ stalls when multiple * SQs are mapped to same SMQ and transmitting pkts at a time. */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); cfg &= ~BIT_ULL(15); rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); } ltdefs = rvu->kpu.lt_def; /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); if (err) return err; /* Setup capabilities of the NIX block */ rvu_nix_setup_capabilities(rvu, blkaddr); /* Initialize admin queue */ err = nix_aq_init(rvu, block); if (err) return err; /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ cfg |= 1ULL; if (!is_rvu_otx2(rvu)) cfg |= NIX_PTP_1STEP_EN; rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); if (!is_rvu_otx2(rvu)) rvu_nix_block_cn10k_init(rvu, nix_hw); if (is_block_implemented(hw, blkaddr)) { err = nix_setup_txschq(rvu, nix_hw, blkaddr); if (err) return err; err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); if (err) return err; err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); if (err) return err; err = nix_setup_mcast(rvu, nix_hw, blkaddr); if (err) return err; err = nix_setup_txvlan(rvu, nix_hw); if (err) return err; /* Configure segmentation offload formats */ nix_setup_lso(rvu, nix_hw, blkaddr); /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. * This helps HW protocol checker to identify headers * and validate length and checksums. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | ltdefs->rx_ol2.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | ltdefs->rx_oip4.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | ltdefs->rx_iip4.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | ltdefs->rx_oip6.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | ltdefs->rx_iip6.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | ltdefs->rx_otcp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | ltdefs->rx_itcp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | ltdefs->rx_oudp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | ltdefs->rx_iudp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | ltdefs->rx_osctp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | ltdefs->rx_isctp.ltype_mask); if (!is_rvu_otx2(rvu)) { /* Enable APAD calculation for other protocols * matching APAD0 and APAD1 lt def registers. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, (ltdefs->rx_apad0.valid << 11) | (ltdefs->rx_apad0.lid << 8) | (ltdefs->rx_apad0.ltype_match << 4) | ltdefs->rx_apad0.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, (ltdefs->rx_apad1.valid << 11) | (ltdefs->rx_apad1.lid << 8) | (ltdefs->rx_apad1.ltype_match << 4) | ltdefs->rx_apad1.ltype_mask); /* Receive ethertype defination register defines layer * information in NPC_RESULT_S to identify the Ethertype * location in L2 header. Used for Ethertype overwriting * in inline IPsec flow. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), (ltdefs->rx_et[0].offset << 12) | (ltdefs->rx_et[0].valid << 11) | (ltdefs->rx_et[0].lid << 8) | (ltdefs->rx_et[0].ltype_match << 4) | ltdefs->rx_et[0].ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), (ltdefs->rx_et[1].offset << 12) | (ltdefs->rx_et[1].valid << 11) | (ltdefs->rx_et[1].lid << 8) | (ltdefs->rx_et[1].ltype_match << 4) | ltdefs->rx_et[1].ltype_mask); } err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); if (err) return err; nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, sizeof(u64), GFP_KERNEL); if (!nix_hw->tx_credits) return -ENOMEM; /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ nix_link_config(rvu, blkaddr, nix_hw); /* Enable Channel backpressure */ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); } return 0; } int rvu_nix_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct nix_hw *nix_hw; int blkaddr = 0, err; int i = 0; hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), GFP_KERNEL); if (!hw->nix) return -ENOMEM; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { nix_hw = &hw->nix[i]; nix_hw->rvu = rvu; nix_hw->blkaddr = blkaddr; err = rvu_nix_block_init(rvu, nix_hw); if (err) return err; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); i++; } return 0; } static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, struct rvu_block *block) { struct nix_txsch *txsch; struct nix_mcast *mcast; struct nix_txvlan *vlan; struct nix_hw *nix_hw; int lvl; rvu_aq_free(rvu, block->aq); if (is_block_implemented(rvu->hw, blkaddr)) { nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return; for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; kfree(txsch->schq.bmap); } kfree(nix_hw->tx_credits); nix_ipolicer_freemem(rvu, nix_hw); vlan = &nix_hw->txvlan; kfree(vlan->rsrc.bmap); mutex_destroy(&vlan->rsrc_lock); mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); mutex_destroy(&mcast->mce_lock); } } void rvu_nix_freemem(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkaddr = 0; blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); while (blkaddr) { block = &hw->block[blkaddr]; rvu_nix_block_freemem(rvu, blkaddr, block); blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); } } int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int nixlf, err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); npc_mcam_enable_flows(rvu, pcifunc); pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); rvu_switch_update_rules(rvu, pcifunc); return rvu_cgx_start_stop_io(rvu, pcifunc, true); } int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; int nixlf, err; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); pfvf = rvu_get_pfvf(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); return rvu_cgx_start_stop_io(rvu, pcifunc, false); } #define RX_SA_BASE GENMASK_ULL(52, 7) void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; int pf = rvu_get_pf(pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; u64 sa_base; void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); nix_interface_deinit(rvu, pcifunc, nixlf); nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); rvu_cgx_start_stop_io(rvu, pcifunc, false); if (pfvf->sq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_SQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); if (err) dev_err(rvu->dev, "SQ ctx disable failed\n"); } if (pfvf->rq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_RQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); if (err) dev_err(rvu->dev, "RQ ctx disable failed\n"); } if (pfvf->cq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_CQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); if (err) dev_err(rvu->dev, "CQ ctx disable failed\n"); } /* reset HW config done for Switch headers */ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, (PKIND_TX | PKIND_RX), 0, 0, 0, 0); /* Disabling CGX and NPC config done for PTP */ if (pfvf->hw_rx_tstamp_en) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); mac_ops = get_mac_ops(cgxd); mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); /* Undo NPC config done for PTP */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) dev_err(rvu->dev, "NPC config for PTP failed\n"); pfvf->hw_rx_tstamp_en = false; } /* reset priority flow control config */ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); /* reset 802.3x flow control config */ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); nix_ctx_free(rvu, pfvf); nix_free_all_bandprof(rvu, pcifunc); sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); if (FIELD_GET(RX_SA_BASE, sa_base)) { err = rvu_cpt_ctx_flush(rvu, pcifunc); if (err) dev_err(rvu->dev, "CPT ctx flush failed with error: %d\n", err); } } #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; int blkaddr, pf; int nixlf; u64 cfg; pf = rvu_get_pf(pcifunc); if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; nixlf = rvu_get_lf(rvu, block, pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); if (enable) cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; else cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); return 0; } int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); } int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); } int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, struct nix_lso_format_cfg *req, struct nix_lso_format_cfg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; int blkaddr, idx, f; u64 reg; pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; /* Find existing matching LSO format, if any */ for (idx = 0; idx < nix_hw->lso.in_use; idx++) { for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { reg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(idx, f)); if (req->fields[f] != (reg & req->field_mask)) break; } if (f == NIX_LSO_FIELD_MAX) break; } if (idx < nix_hw->lso.in_use) { /* Match found */ rsp->lso_format_idx = idx; return 0; } if (nix_hw->lso.in_use == nix_hw->lso.total) return NIX_AF_ERR_LSO_CFG_FAIL; rsp->lso_format_idx = nix_hw->lso.in_use++; for (f = 0; f < NIX_LSO_FIELD_MAX; f++) rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), req->fields[f]); return 0; } #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, int blkaddr) { u8 cpt_idx, cpt_blkaddr; u64 val; cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; if (req->enable) { val = 0; /* Enable context prefetching */ if (!is_rvu_otx2(rvu)) val |= BIT_ULL(51); /* Set OPCODE and EGRP */ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); /* Set CPT queue for inline IPSec */ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, req->inst_qsel.cpt_pf_func); if (!is_rvu_otx2(rvu)) { cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : BLKADDR_CPT1; val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); } rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), val); /* Set CPT credit */ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); if ((val & 0x3FFFFF) != 0x3FFFFF) rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 0x3FFFFF - val); val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); } else { rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 0x0); val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); if ((val & 0x3FFFFF) != 0x3FFFFF) rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), 0x3FFFFF - val); } } int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, struct msg_rsp *rsp) { if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) return 0; nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); return 0; } int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, struct msg_req *req, struct nix_inline_ipsec_cfg *rsp) { u64 val; if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) return 0; val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); return 0; } int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp) { int lf, blkaddr, err; u64 val; if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) return 0; err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); if (err) return err; if (req->enable) { /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ val = (u64)req->ipsec_cfg0.tt << 44 | (u64)req->ipsec_cfg0.tag_const << 20 | (u64)req->ipsec_cfg0.sa_pow2_size << 16 | req->ipsec_cfg0.lenm1_max; if (blkaddr == BLKADDR_NIX1) val |= BIT_ULL(46); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); /* Set SA_IDX_W and SA_IDX_MAX */ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | req->ipsec_cfg1.sa_idx_max; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); /* Set SA base address */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), req->sa_base_addr); } else { rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), 0x0); } return 0; } void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); /* overwrite vf mac address with default_mac */ if (from_vf) ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); } /* NIX ingress policers or bandwidth profiles APIs */ static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) { struct npc_lt_def_cfg defs, *ltdefs; ltdefs = &defs; memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); /* Extract PCP and DEI fields from outer VLAN from byte offset * 2 from the start of LB_PTR (ie TAG). * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN * fields are considered when 'Tunnel enable' is set in profile. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, (2UL << 12) | (ltdefs->ovlan.lid << 8) | (ltdefs->ovlan.ltype_match << 4) | ltdefs->ovlan.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, (2UL << 12) | (ltdefs->ivlan.lid << 8) | (ltdefs->ivlan.ltype_match << 4) | ltdefs->ivlan.ltype_mask); /* DSCP field in outer and tunneled IPv4 packets */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | ltdefs->rx_oip4.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | ltdefs->rx_iip4.ltype_mask); /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | ltdefs->rx_oip6.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | ltdefs->rx_iip6.ltype_mask); } static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, int layer, int prof_idx) { struct nix_cn10k_aq_enq_req aq_req; int rc; memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; aq_req.op = NIX_AQ_INSTOP_INIT; /* Context is all zeros, submit to AQ */ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, (struct nix_aq_enq_req *)&aq_req, NULL); if (rc) dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", layer, prof_idx); return rc; } static int nix_setup_ipolicers(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; struct nix_ipolicer *ipolicer; int err, layer, prof_idx; u64 cfg; cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); if (!(cfg & BIT_ULL(61))) { hw->cap.ipolicer = false; return 0; } hw->cap.ipolicer = true; nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, sizeof(*ipolicer), GFP_KERNEL); if (!nix_hw->ipolicer) return -ENOMEM; cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { ipolicer = &nix_hw->ipolicer[layer]; switch (layer) { case BAND_PROF_LEAF_LAYER: ipolicer->band_prof.max = cfg & 0XFFFF; break; case BAND_PROF_MID_LAYER: ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; break; case BAND_PROF_TOP_LAYER: ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; break; } if (!ipolicer->band_prof.max) continue; err = rvu_alloc_bitmap(&ipolicer->band_prof); if (err) return err; ipolicer->pfvf_map = devm_kcalloc(rvu->dev, ipolicer->band_prof.max, sizeof(u16), GFP_KERNEL); if (!ipolicer->pfvf_map) return -ENOMEM; ipolicer->match_id = devm_kcalloc(rvu->dev, ipolicer->band_prof.max, sizeof(u16), GFP_KERNEL); if (!ipolicer->match_id) return -ENOMEM; for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { /* Set AF as current owner for INIT ops to succeed */ ipolicer->pfvf_map[prof_idx] = 0x00; /* There is no enable bit in the profile context, * so no context disable. So let's INIT them here * so that PF/VF later on have to just do WRITE to * setup policer rates and config. */ err = nix_init_policer_context(rvu, nix_hw, layer, prof_idx); if (err) return err; } /* Allocate memory for maintaining ref_counts for MID level * profiles, this will be needed for leaf layer profiles' * aggregation. */ if (layer != BAND_PROF_MID_LAYER) continue; ipolicer->ref_count = devm_kcalloc(rvu->dev, ipolicer->band_prof.max, sizeof(u16), GFP_KERNEL); if (!ipolicer->ref_count) return -ENOMEM; } /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); nix_config_rx_pkt_policer_precolor(rvu, blkaddr); return 0; } static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_ipolicer *ipolicer; int layer; if (!rvu->hw->cap.ipolicer) return; for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { ipolicer = &nix_hw->ipolicer[layer]; if (!ipolicer->band_prof.max) continue; kfree(ipolicer->band_prof.bmap); } } static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc) { struct nix_ipolicer *ipolicer; int layer, hi_layer, prof_idx; /* Bits [15:14] in profile index represent layer */ layer = (req->qidx >> 14) & 0x03; prof_idx = req->qidx & 0x3FFF; ipolicer = &nix_hw->ipolicer[layer]; if (prof_idx >= ipolicer->band_prof.max) return -EINVAL; /* Check if the profile is allocated to the requesting PCIFUNC or not * with the exception of AF. AF is allowed to read and update contexts. */ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) return -EINVAL; /* If this profile is linked to higher layer profile then check * if that profile is also allocated to the requesting PCIFUNC * or not. */ if (!req->prof.hl_en) return 0; /* Leaf layer profile can link only to mid layer and * mid layer to top layer. */ if (layer == BAND_PROF_LEAF_LAYER) hi_layer = BAND_PROF_MID_LAYER; else if (layer == BAND_PROF_MID_LAYER) hi_layer = BAND_PROF_TOP_LAYER; else return -EINVAL; ipolicer = &nix_hw->ipolicer[hi_layer]; prof_idx = req->prof.band_prof_id; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) return -EINVAL; return 0; } int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, struct nix_bandprof_alloc_req *req, struct nix_bandprof_alloc_rsp *rsp) { int blkaddr, layer, prof, idx, err; u16 pcifunc = req->hdr.pcifunc; struct nix_ipolicer *ipolicer; struct nix_hw *nix_hw; if (!rvu->hw->cap.ipolicer) return NIX_AF_ERR_IPOLICER_NOTSUPP; err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) return err; mutex_lock(&rvu->rsrc_lock); for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; if (!req->prof_count[layer]) continue; ipolicer = &nix_hw->ipolicer[layer]; for (idx = 0; idx < req->prof_count[layer]; idx++) { /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ if (idx == MAX_BANDPROF_PER_PFFUNC) break; prof = rvu_alloc_rsrc(&ipolicer->band_prof); if (prof < 0) break; rsp->prof_count[layer]++; rsp->prof_idx[layer][idx] = prof; ipolicer->pfvf_map[prof] = pcifunc; } } mutex_unlock(&rvu->rsrc_lock); return 0; } static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) { int blkaddr, layer, prof_idx, err; struct nix_ipolicer *ipolicer; struct nix_hw *nix_hw; if (!rvu->hw->cap.ipolicer) return NIX_AF_ERR_IPOLICER_NOTSUPP; err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) return err; mutex_lock(&rvu->rsrc_lock); /* Free all the profiles allocated to the PCIFUNC */ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; ipolicer = &nix_hw->ipolicer[layer]; for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { if (ipolicer->pfvf_map[prof_idx] != pcifunc) continue; /* Clear ratelimit aggregation, if any */ if (layer == BAND_PROF_LEAF_LAYER && ipolicer->match_id[prof_idx]) nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); ipolicer->pfvf_map[prof_idx] = 0x00; ipolicer->match_id[prof_idx] = 0; rvu_free_rsrc(&ipolicer->band_prof, prof_idx); } } mutex_unlock(&rvu->rsrc_lock); return 0; } int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, struct nix_bandprof_free_req *req, struct msg_rsp *rsp) { int blkaddr, layer, prof_idx, idx, err; u16 pcifunc = req->hdr.pcifunc; struct nix_ipolicer *ipolicer; struct nix_hw *nix_hw; if (req->free_all) return nix_free_all_bandprof(rvu, pcifunc); if (!rvu->hw->cap.ipolicer) return NIX_AF_ERR_IPOLICER_NOTSUPP; err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) return err; mutex_lock(&rvu->rsrc_lock); /* Free the requested profile indices */ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; if (!req->prof_count[layer]) continue; ipolicer = &nix_hw->ipolicer[layer]; for (idx = 0; idx < req->prof_count[layer]; idx++) { prof_idx = req->prof_idx[layer][idx]; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) continue; /* Clear ratelimit aggregation, if any */ if (layer == BAND_PROF_LEAF_LAYER && ipolicer->match_id[prof_idx]) nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); ipolicer->pfvf_map[prof_idx] = 0x00; ipolicer->match_id[prof_idx] = 0; rvu_free_rsrc(&ipolicer->band_prof, prof_idx); if (idx == MAX_BANDPROF_PER_PFFUNC) break; } } mutex_unlock(&rvu->rsrc_lock); return 0; } int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_req *aq_req, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx) { memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); aq_req->hdr.pcifunc = pcifunc; aq_req->ctype = ctype; aq_req->op = NIX_AQ_INSTOP_READ; aq_req->qidx = qidx; return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, (struct nix_aq_enq_req *)aq_req, (struct nix_aq_enq_rsp *)aq_rsp); } static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_req *aq_req, struct nix_cn10k_aq_enq_rsp *aq_rsp, u32 leaf_prof, u16 mid_prof) { memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); aq_req->hdr.pcifunc = 0x00; aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; aq_req->op = NIX_AQ_INSTOP_WRITE; aq_req->qidx = leaf_prof; aq_req->prof.band_prof_id = mid_prof; aq_req->prof_mask.band_prof_id = GENMASK(6, 0); aq_req->prof.hl_en = 1; aq_req->prof_mask.hl_en = 1; return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, (struct nix_aq_enq_req *)aq_req, (struct nix_aq_enq_rsp *)aq_rsp); } int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, u16 rq_idx, u16 match_id) { int leaf_prof, mid_prof, leaf_match; struct nix_cn10k_aq_enq_req aq_req; struct nix_cn10k_aq_enq_rsp aq_rsp; struct nix_ipolicer *ipolicer; struct nix_hw *nix_hw; int blkaddr, idx, rc; if (!rvu->hw->cap.ipolicer) return 0; rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (rc) return rc; /* Fetch the RQ's context to see if policing is enabled */ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, NIX_AQ_CTYPE_RQ, rq_idx); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", __func__, rq_idx, pcifunc); return rc; } if (!aq_rsp.rq.policer_ena) return 0; /* Get the bandwidth profile ID mapped to this RQ */ leaf_prof = aq_rsp.rq.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; ipolicer->match_id[leaf_prof] = match_id; /* Check if any other leaf profile is marked with same match_id */ for (idx = 0; idx < ipolicer->band_prof.max; idx++) { if (idx == leaf_prof) continue; if (ipolicer->match_id[idx] != match_id) continue; leaf_match = idx; break; } if (idx == ipolicer->band_prof.max) return 0; /* Fetch the matching profile's context to check if it's already * mapped to a mid level profile. */ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, NIX_AQ_CTYPE_BANDPROF, leaf_match); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch context of leaf profile %d\n", __func__, leaf_match); return rc; } ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; if (aq_rsp.prof.hl_en) { /* Get Mid layer prof index and map leaf_prof index * also such that flows that are being steered * to different RQs and marked with same match_id * are rate limited in a aggregate fashion */ mid_prof = aq_rsp.prof.band_prof_id; rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_prof, mid_prof); if (rc) { dev_err(rvu->dev, "%s: Failed to map leaf(%d) and mid(%d) profiles\n", __func__, leaf_prof, mid_prof); goto exit; } mutex_lock(&rvu->rsrc_lock); ipolicer->ref_count[mid_prof]++; mutex_unlock(&rvu->rsrc_lock); goto exit; } /* Allocate a mid layer profile and * map both 'leaf_prof' and 'leaf_match' profiles to it. */ mutex_lock(&rvu->rsrc_lock); mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); if (mid_prof < 0) { dev_err(rvu->dev, "%s: Unable to allocate mid layer profile\n", __func__); mutex_unlock(&rvu->rsrc_lock); goto exit; } mutex_unlock(&rvu->rsrc_lock); ipolicer->pfvf_map[mid_prof] = 0x00; ipolicer->ref_count[mid_prof] = 0; /* Initialize mid layer profile same as 'leaf_prof' */ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, NIX_AQ_CTYPE_BANDPROF, leaf_prof); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch context of leaf profile %d\n", __func__, leaf_prof); goto exit; } memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); aq_req.hdr.pcifunc = 0x00; aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; aq_req.op = NIX_AQ_INSTOP_WRITE; memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); /* Clear higher layer enable bit in the mid profile, just in case */ aq_req.prof.hl_en = 0; aq_req.prof_mask.hl_en = 1; rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, (struct nix_aq_enq_req *)&aq_req, NULL); if (rc) { dev_err(rvu->dev, "%s: Failed to INIT context of mid layer profile %d\n", __func__, mid_prof); goto exit; } /* Map both leaf profiles to this mid layer profile */ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_prof, mid_prof); if (rc) { dev_err(rvu->dev, "%s: Failed to map leaf(%d) and mid(%d) profiles\n", __func__, leaf_prof, mid_prof); goto exit; } mutex_lock(&rvu->rsrc_lock); ipolicer->ref_count[mid_prof]++; mutex_unlock(&rvu->rsrc_lock); rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_match, mid_prof); if (rc) { dev_err(rvu->dev, "%s: Failed to map leaf(%d) and mid(%d) profiles\n", __func__, leaf_match, mid_prof); ipolicer->ref_count[mid_prof]--; goto exit; } mutex_lock(&rvu->rsrc_lock); ipolicer->ref_count[mid_prof]++; mutex_unlock(&rvu->rsrc_lock); exit: return rc; } /* Called with mutex rsrc_lock */ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof) { struct nix_cn10k_aq_enq_req aq_req; struct nix_cn10k_aq_enq_rsp aq_rsp; struct nix_ipolicer *ipolicer; u16 mid_prof; int rc; mutex_unlock(&rvu->rsrc_lock); rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, NIX_AQ_CTYPE_BANDPROF, leaf_prof); mutex_lock(&rvu->rsrc_lock); if (rc) { dev_err(rvu->dev, "%s: Failed to fetch context of leaf profile %d\n", __func__, leaf_prof); return; } if (!aq_rsp.prof.hl_en) return; mid_prof = aq_rsp.prof.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; ipolicer->ref_count[mid_prof]--; /* If ref_count is zero, free mid layer profile */ if (!ipolicer->ref_count[mid_prof]) { ipolicer->pfvf_map[mid_prof] = 0x00; rvu_free_rsrc(&ipolicer->band_prof, mid_prof); } } int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, struct nix_bandprof_get_hwinfo_rsp *rsp) { struct nix_ipolicer *ipolicer; int blkaddr, layer, err; struct nix_hw *nix_hw; u64 tu; if (!rvu->hw->cap.ipolicer) return NIX_AF_ERR_IPOLICER_NOTSUPP; err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); if (err) return err; /* Return number of bandwidth profiles free at each layer */ mutex_lock(&rvu->rsrc_lock); for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { if (layer == BAND_PROF_INVAL_LAYER) continue; ipolicer = &nix_hw->ipolicer[layer]; rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); } mutex_unlock(&rvu->rsrc_lock); /* Set the policer timeunit in nanosec */ tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); rsp->policer_timeunit = (tu + 1) * 100; return 0; }
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
// SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. * */ #define CREATE_TRACE_POINTS #include "rvu_trace.h" EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
linux-master
drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ #include <linux/kernel.h> #include <linux/list.h> #include "prestera.h" #include "prestera_acl.h" #include "prestera_flow.h" #include "prestera_flower.h" #include "prestera_matchall.h" #include "prestera_span.h" static LIST_HEAD(prestera_block_cb_list); static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, struct tc_cls_matchall_offload *f) { switch (f->command) { case TC_CLSMATCHALL_REPLACE: return prestera_mall_replace(block, f); case TC_CLSMATCHALL_DESTROY: prestera_mall_destroy(block); return 0; default: return -EOPNOTSUPP; } } static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, struct flow_cls_offload *f) { switch (f->command) { case FLOW_CLS_REPLACE: return prestera_flower_replace(block, f); case FLOW_CLS_DESTROY: prestera_flower_destroy(block, f); return 0; case FLOW_CLS_STATS: return prestera_flower_stats(block, f); case FLOW_CLS_TMPLT_CREATE: return prestera_flower_tmplt_create(block, f); case FLOW_CLS_TMPLT_DESTROY: prestera_flower_tmplt_destroy(block, f); return 0; default: return -EOPNOTSUPP; } } static int prestera_flow_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct prestera_flow_block *block = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: return prestera_flow_block_flower_cb(block, type_data); case TC_SETUP_CLSMATCHALL: return prestera_flow_block_mall_cb(block, type_data); default: return -EOPNOTSUPP; } } static void prestera_flow_block_destroy(void *cb_priv) { struct prestera_flow_block *block = cb_priv; prestera_flower_template_cleanup(block); WARN_ON(!list_empty(&block->template_list)); WARN_ON(!list_empty(&block->binding_list)); kfree(block); } static struct prestera_flow_block * prestera_flow_block_create(struct prestera_switch *sw, struct net *net, bool ingress) { struct prestera_flow_block *block; block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) return NULL; INIT_LIST_HEAD(&block->binding_list); INIT_LIST_HEAD(&block->template_list); block->net = net; block->sw = sw; block->mall.prio_min = UINT_MAX; block->mall.prio_max = 0; block->mall.bound = false; block->ingress = ingress; return block; } static void prestera_flow_block_release(void *cb_priv) { struct prestera_flow_block *block = cb_priv; prestera_flow_block_destroy(block); } static bool prestera_flow_block_is_bound(const struct prestera_flow_block *block) { return block->ruleset_zero; } static struct prestera_flow_block_binding * prestera_flow_block_lookup(struct prestera_flow_block *block, struct prestera_port *port) { struct prestera_flow_block_binding *binding; list_for_each_entry(binding, &block->binding_list, list) if (binding->port == port) return binding; return NULL; } static int prestera_flow_block_bind(struct prestera_flow_block *block, struct prestera_port *port) { struct prestera_flow_block_binding *binding; int err; binding = kzalloc(sizeof(*binding), GFP_KERNEL); if (!binding) return -ENOMEM; binding->span_id = PRESTERA_SPAN_INVALID_ID; binding->port = port; if (prestera_flow_block_is_bound(block)) { err = prestera_acl_ruleset_bind(block->ruleset_zero, port); if (err) goto err_ruleset_bind; } list_add(&binding->list, &block->binding_list); return 0; err_ruleset_bind: kfree(binding); return err; } static int prestera_flow_block_unbind(struct prestera_flow_block *block, struct prestera_port *port) { struct prestera_flow_block_binding *binding; binding = prestera_flow_block_lookup(block, port); if (!binding) return -ENOENT; list_del(&binding->list); if (prestera_flow_block_is_bound(block)) prestera_acl_ruleset_unbind(block->ruleset_zero, port); kfree(binding); return 0; } static struct prestera_flow_block * prestera_flow_block_get(struct prestera_switch *sw, struct flow_block_offload *f, bool *register_block, bool ingress) { struct prestera_flow_block *block; struct flow_block_cb *block_cb; block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); if (!block_cb) { block = prestera_flow_block_create(sw, f->net, ingress); if (!block) return ERR_PTR(-ENOMEM); block_cb = flow_block_cb_alloc(prestera_flow_block_cb, sw, block, prestera_flow_block_release); if (IS_ERR(block_cb)) { prestera_flow_block_destroy(block); return ERR_CAST(block_cb); } block->block_cb = block_cb; *register_block = true; } else { block = flow_block_cb_priv(block_cb); *register_block = false; } flow_block_cb_incref(block_cb); return block; } static void prestera_flow_block_put(struct prestera_flow_block *block) { struct flow_block_cb *block_cb = block->block_cb; if (flow_block_cb_decref(block_cb)) return; flow_block_cb_free(block_cb); prestera_flow_block_destroy(block); } static int prestera_setup_flow_block_bind(struct prestera_port *port, struct flow_block_offload *f, bool ingress) { struct prestera_switch *sw = port->sw; struct prestera_flow_block *block; struct flow_block_cb *block_cb; bool register_block; int err; block = prestera_flow_block_get(sw, f, &register_block, ingress); if (IS_ERR(block)) return PTR_ERR(block); block_cb = block->block_cb; err = prestera_flow_block_bind(block, port); if (err) goto err_block_bind; if (register_block) { flow_block_cb_add(block_cb, f); list_add_tail(&block_cb->driver_list, &prestera_block_cb_list); } if (ingress) port->ingress_flow_block = block; else port->egress_flow_block = block; return 0; err_block_bind: prestera_flow_block_put(block); return err; } static void prestera_setup_flow_block_unbind(struct prestera_port *port, struct flow_block_offload *f, bool ingress) { struct prestera_switch *sw = port->sw; struct prestera_flow_block *block; struct flow_block_cb *block_cb; int err; block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); if (!block_cb) return; block = flow_block_cb_priv(block_cb); prestera_mall_destroy(block); err = prestera_flow_block_unbind(block, port); if (err) goto error; if (!flow_block_cb_decref(block_cb)) { flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); } error: if (ingress) port->ingress_flow_block = NULL; else port->egress_flow_block = NULL; } static int prestera_setup_flow_block_clsact(struct prestera_port *port, struct flow_block_offload *f, bool ingress) { f->driver_block_list = &prestera_block_cb_list; switch (f->command) { case FLOW_BLOCK_BIND: return prestera_setup_flow_block_bind(port, f, ingress); case FLOW_BLOCK_UNBIND: prestera_setup_flow_block_unbind(port, f, ingress); return 0; default: return -EOPNOTSUPP; } } int prestera_flow_block_setup(struct prestera_port *port, struct flow_block_offload *f) { switch (f->binder_type) { case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: return prestera_setup_flow_block_clsact(port, f, true); case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: return prestera_setup_flow_block_clsact(port, f, false); default: return -EOPNOTSUPP; } }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_flow.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/bitfield.h> #include <linux/dmapool.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/platform_device.h> #include "prestera_dsa.h" #include "prestera.h" #include "prestera_hw.h" #include "prestera_rxtx.h" #include "prestera_devlink.h" #define PRESTERA_SDMA_WAIT_MUL 10 struct prestera_sdma_desc { __le32 word1; __le32 word2; __le32 buff; __le32 next; } __packed __aligned(16); #define PRESTERA_SDMA_BUFF_SIZE_MAX 1544 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0)) #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN) #define PRESTERA_SDMA_RX_DESC_CPU_OWN 0 #define PRESTERA_SDMA_RX_DESC_DMA_OWN 1 #define PRESTERA_SDMA_RX_QUEUE_NUM 8 #define PRESTERA_SDMA_RX_DESC_PER_Q 1000 #define PRESTERA_SDMA_TX_DESC_PER_Q 1000 #define PRESTERA_SDMA_TX_MAX_BURST 64 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31) #define PRESTERA_SDMA_TX_DESC_CPU_OWN 0 #define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN) #define PRESTERA_SDMA_TX_DESC_LAST BIT(20) #define PRESTERA_SDMA_TX_DESC_FIRST BIT(21) #define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12) #define PRESTERA_SDMA_TX_DESC_SINGLE \ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST) #define PRESTERA_SDMA_TX_DESC_INIT \ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC) #define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16) #define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0 #define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868 struct prestera_sdma_buf { struct prestera_sdma_desc *desc; dma_addr_t desc_dma; struct sk_buff *skb; dma_addr_t buf_dma; bool is_used; }; struct prestera_rx_ring { struct prestera_sdma_buf *bufs; int next_rx; }; struct prestera_tx_ring { struct prestera_sdma_buf *bufs; int next_tx; int max_burst; int burst; }; struct prestera_sdma { struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM]; struct prestera_tx_ring tx_ring; struct prestera_switch *sw; struct dma_pool *desc_pool; struct work_struct tx_work; struct napi_struct rx_napi; struct net_device napi_dev; u32 map_addr; u64 dma_mask; /* protect SDMA with concurrent access from multiple CPUs */ spinlock_t tx_lock; }; struct prestera_rxtx { struct prestera_sdma sdma; }; static int prestera_sdma_buf_init(struct prestera_sdma *sdma, struct prestera_sdma_buf *buf) { struct prestera_sdma_desc *desc; dma_addr_t dma; desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma); if (!desc) return -ENOMEM; buf->buf_dma = DMA_MAPPING_ERROR; buf->desc_dma = dma; buf->desc = desc; buf->skb = NULL; return 0; } static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa) { return sdma->map_addr + pa; } static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma, struct prestera_sdma_desc *desc, dma_addr_t buf) { u32 word = le32_to_cpu(desc->word2); u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0)); desc->word2 = cpu_to_le32(word); desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); /* make sure buffer is set before reset the descriptor */ wmb(); desc->word1 = cpu_to_le32(0xA0000000); } static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma, struct prestera_sdma_desc *desc, dma_addr_t next) { desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); } static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma, struct prestera_sdma_buf *buf) { struct device *dev = sdma->sw->dev->dev; struct sk_buff *skb; dma_addr_t dma; skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC); if (!skb) return -ENOMEM; dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma)) goto err_dma_map; if (buf->skb) dma_unmap_single(dev, buf->buf_dma, buf->skb->len, DMA_FROM_DEVICE); buf->buf_dma = dma; buf->skb = skb; return 0; err_dma_map: kfree_skb(skb); return -ENOMEM; } static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma, struct prestera_sdma_buf *buf) { dma_addr_t buf_dma = buf->buf_dma; struct sk_buff *skb = buf->skb; u32 len = skb->len; int err; err = prestera_sdma_rx_skb_alloc(sdma, buf); if (err) { buf->buf_dma = buf_dma; buf->skb = skb; skb = alloc_skb(skb->len, GFP_ATOMIC); if (skb) { skb_put(skb, len); skb_copy_from_linear_data(buf->skb, skb->data, len); } } prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma); return skb; } static int prestera_rxtx_process_skb(struct prestera_sdma *sdma, struct sk_buff *skb) { struct prestera_port *port; struct prestera_dsa dsa; u32 hw_port, dev_id; u8 cpu_code; int err; skb_pull(skb, ETH_HLEN); /* ethertype field is part of the dsa header */ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN); if (err) return err; dev_id = dsa.hw_dev_num; hw_port = dsa.port_num; port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port); if (unlikely(!port)) { dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n", dev_id, hw_port); return -ENOENT; } if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN))) return -EINVAL; /* remove DSA tag and update checksum */ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN); memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN, ETH_ALEN * 2); skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, port->dev); if (dsa.vlan.is_tagged) { u16 tci = dsa.vlan.vid & VLAN_VID_MASK; tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT; if (dsa.vlan.cfi_bit) tci |= VLAN_CFI_MASK; __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci); } cpu_code = dsa.cpu_code; prestera_devlink_trap_report(port, skb, cpu_code); return 0; } static int prestera_sdma_next_rx_buf_idx(int buf_idx) { return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q; } static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget) { int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; unsigned int rxq_done_map = 0; struct prestera_sdma *sdma; struct list_head rx_list; unsigned int qmask; int pkts_done = 0; int q; qnum = PRESTERA_SDMA_RX_QUEUE_NUM; qmask = GENMASK(qnum - 1, 0); INIT_LIST_HEAD(&rx_list); sdma = container_of(napi, struct prestera_sdma, rx_napi); while (pkts_done < budget && rxq_done_map != qmask) { for (q = 0; q < qnum && pkts_done < budget; q++) { struct prestera_rx_ring *ring = &sdma->rx_ring[q]; struct prestera_sdma_desc *desc; struct prestera_sdma_buf *buf; int buf_idx = ring->next_rx; struct sk_buff *skb; buf = &ring->bufs[buf_idx]; desc = buf->desc; if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) { rxq_done_map &= ~BIT(q); } else { rxq_done_map |= BIT(q); continue; } pkts_done++; __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc)); skb = prestera_sdma_rx_skb_get(sdma, buf); if (!skb) goto rx_next_buf; if (unlikely(prestera_rxtx_process_skb(sdma, skb))) goto rx_next_buf; list_add_tail(&skb->list, &rx_list); rx_next_buf: ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx); } } if (pkts_done < budget && napi_complete_done(napi, pkts_done)) prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, GENMASK(9, 2)); netif_receive_skb_list(&rx_list); return pkts_done; } static void prestera_sdma_rx_fini(struct prestera_sdma *sdma) { int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; int q, b; /* disable all rx queues */ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, GENMASK(15, 8)); for (q = 0; q < qnum; q++) { struct prestera_rx_ring *ring = &sdma->rx_ring[q]; if (!ring->bufs) break; for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) { struct prestera_sdma_buf *buf = &ring->bufs[b]; if (buf->desc_dma) dma_pool_free(sdma->desc_pool, buf->desc, buf->desc_dma); if (!buf->skb) continue; if (buf->buf_dma != DMA_MAPPING_ERROR) dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma, buf->skb->len, DMA_FROM_DEVICE); kfree_skb(buf->skb); } } } static int prestera_sdma_rx_init(struct prestera_sdma *sdma) { int bnum = PRESTERA_SDMA_RX_DESC_PER_Q; int qnum = PRESTERA_SDMA_RX_QUEUE_NUM; int err; int q; /* disable all rx queues */ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, GENMASK(15, 8)); for (q = 0; q < qnum; q++) { struct prestera_sdma_buf *head, *tail, *next, *prev; struct prestera_rx_ring *ring = &sdma->rx_ring[q]; ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); if (!ring->bufs) return -ENOMEM; ring->next_rx = 0; tail = &ring->bufs[bnum - 1]; head = &ring->bufs[0]; next = head; prev = next; do { err = prestera_sdma_buf_init(sdma, next); if (err) return err; err = prestera_sdma_rx_skb_alloc(sdma, next); if (err) return err; prestera_sdma_rx_desc_init(sdma, next->desc, next->buf_dma); prestera_sdma_rx_desc_set_next(sdma, prev->desc, next->desc_dma); prev = next; next++; } while (prev != tail); /* join tail with head to make a circular list */ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma); prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q), prestera_sdma_map(sdma, head->desc_dma)); } /* make sure all rx descs are filled before enabling all rx queues */ wmb(); prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG, GENMASK(7, 0)); return 0; } static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma, struct prestera_sdma_desc *desc) { desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT); desc->word2 = 0; } static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma, struct prestera_sdma_desc *desc, dma_addr_t next) { desc->next = cpu_to_le32(prestera_sdma_map(sdma, next)); } static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma, struct prestera_sdma_desc *desc, dma_addr_t buf, size_t len) { u32 word = le32_to_cpu(desc->word2); u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16)); desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf)); desc->word2 = cpu_to_le32(word); } static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc) { u32 word = le32_to_cpu(desc->word1); word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31; /* make sure everything is written before enable xmit */ wmb(); desc->word1 = cpu_to_le32(word); } static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma, struct prestera_sdma_buf *buf, struct sk_buff *skb) { struct device *dma_dev = sdma->sw->dev->dev; dma_addr_t dma; dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, dma)) return -ENOMEM; buf->buf_dma = dma; buf->skb = skb; return 0; } static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma, struct prestera_sdma_buf *buf) { struct device *dma_dev = sdma->sw->dev->dev; dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE); } static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work) { int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; struct prestera_tx_ring *tx_ring; struct prestera_sdma *sdma; int b; sdma = container_of(work, struct prestera_sdma, tx_work); tx_ring = &sdma->tx_ring; for (b = 0; b < bnum; b++) { struct prestera_sdma_buf *buf = &tx_ring->bufs[b]; if (!buf->is_used) continue; if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc)) continue; prestera_sdma_tx_buf_unmap(sdma, buf); dev_consume_skb_any(buf->skb); buf->skb = NULL; /* make sure everything is cleaned up */ wmb(); buf->is_used = false; } } static int prestera_sdma_tx_init(struct prestera_sdma *sdma) { struct prestera_sdma_buf *head, *tail, *next, *prev; struct prestera_tx_ring *tx_ring = &sdma->tx_ring; int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; int err; INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn); spin_lock_init(&sdma->tx_lock); tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL); if (!tx_ring->bufs) return -ENOMEM; tail = &tx_ring->bufs[bnum - 1]; head = &tx_ring->bufs[0]; next = head; prev = next; tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST; tx_ring->burst = tx_ring->max_burst; tx_ring->next_tx = 0; do { err = prestera_sdma_buf_init(sdma, next); if (err) return err; next->is_used = false; prestera_sdma_tx_desc_init(sdma, next->desc); prestera_sdma_tx_desc_set_next(sdma, prev->desc, next->desc_dma); prev = next; next++; } while (prev != tail); /* join tail with head to make a circular list */ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma); /* make sure descriptors are written */ wmb(); prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG, prestera_sdma_map(sdma, head->desc_dma)); return 0; } static void prestera_sdma_tx_fini(struct prestera_sdma *sdma) { struct prestera_tx_ring *ring = &sdma->tx_ring; int bnum = PRESTERA_SDMA_TX_DESC_PER_Q; int b; cancel_work_sync(&sdma->tx_work); if (!ring->bufs) return; for (b = 0; b < bnum; b++) { struct prestera_sdma_buf *buf = &ring->bufs[b]; if (buf->desc) dma_pool_free(sdma->desc_pool, buf->desc, buf->desc_dma); if (!buf->skb) continue; dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE); dev_consume_skb_any(buf->skb); } } static void prestera_rxtx_handle_event(struct prestera_switch *sw, struct prestera_event *evt, void *arg) { struct prestera_sdma *sdma = arg; if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT) return; prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0); napi_schedule(&sdma->rx_napi); } static int prestera_sdma_switch_init(struct prestera_switch *sw) { struct prestera_sdma *sdma = &sw->rxtx->sdma; struct device *dev = sw->dev->dev; struct prestera_rxtx_params p; int err; p.use_sdma = true; err = prestera_hw_rxtx_init(sw, &p); if (err) { dev_err(dev, "failed to init rxtx by hw\n"); return err; } sdma->dma_mask = dma_get_mask(dev); sdma->map_addr = p.map_addr; sdma->sw = sw; sdma->desc_pool = dma_pool_create("desc_pool", dev, sizeof(struct prestera_sdma_desc), 16, 0); if (!sdma->desc_pool) return -ENOMEM; err = prestera_sdma_rx_init(sdma); if (err) { dev_err(dev, "failed to init rx ring\n"); goto err_rx_init; } err = prestera_sdma_tx_init(sdma); if (err) { dev_err(dev, "failed to init tx ring\n"); goto err_tx_init; } err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX, prestera_rxtx_handle_event, sdma); if (err) goto err_evt_register; init_dummy_netdev(&sdma->napi_dev); netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll); napi_enable(&sdma->rx_napi); return 0; err_evt_register: err_tx_init: prestera_sdma_tx_fini(sdma); err_rx_init: prestera_sdma_rx_fini(sdma); dma_pool_destroy(sdma->desc_pool); return err; } static void prestera_sdma_switch_fini(struct prestera_switch *sw) { struct prestera_sdma *sdma = &sw->rxtx->sdma; napi_disable(&sdma->rx_napi); netif_napi_del(&sdma->rx_napi); prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX, prestera_rxtx_handle_event); prestera_sdma_tx_fini(sdma); prestera_sdma_rx_fini(sdma); dma_pool_destroy(sdma->desc_pool); } static bool prestera_sdma_is_ready(struct prestera_sdma *sdma) { return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1); } static int prestera_sdma_tx_wait(struct prestera_sdma *sdma, struct prestera_tx_ring *tx_ring) { int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst; do { if (prestera_sdma_is_ready(sdma)) return 0; udelay(1); } while (--tx_wait_num); return -EBUSY; } static void prestera_sdma_tx_start(struct prestera_sdma *sdma) { prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1); schedule_work(&sdma->tx_work); } static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma, struct sk_buff *skb) { struct device *dma_dev = sdma->sw->dev->dev; struct net_device *dev = skb->dev; struct prestera_tx_ring *tx_ring; struct prestera_sdma_buf *buf; int err; spin_lock(&sdma->tx_lock); tx_ring = &sdma->tx_ring; buf = &tx_ring->bufs[tx_ring->next_tx]; if (buf->is_used) { schedule_work(&sdma->tx_work); goto drop_skb; } if (unlikely(eth_skb_pad(skb))) goto drop_skb_nofree; err = prestera_sdma_tx_buf_map(sdma, buf, skb); if (err) goto drop_skb; prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len); dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len, DMA_TO_DEVICE); if (tx_ring->burst) { tx_ring->burst--; } else { tx_ring->burst = tx_ring->max_burst; err = prestera_sdma_tx_wait(sdma, tx_ring); if (err) goto drop_skb_unmap; } tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q; prestera_sdma_tx_desc_xmit(buf->desc); buf->is_used = true; prestera_sdma_tx_start(sdma); goto tx_done; drop_skb_unmap: prestera_sdma_tx_buf_unmap(sdma, buf); drop_skb: dev_consume_skb_any(skb); drop_skb_nofree: dev->stats.tx_dropped++; tx_done: spin_unlock(&sdma->tx_lock); return NETDEV_TX_OK; } int prestera_rxtx_switch_init(struct prestera_switch *sw) { struct prestera_rxtx *rxtx; int err; rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); if (!rxtx) return -ENOMEM; sw->rxtx = rxtx; err = prestera_sdma_switch_init(sw); if (err) kfree(rxtx); return err; } void prestera_rxtx_switch_fini(struct prestera_switch *sw) { prestera_sdma_switch_fini(sw); kfree(sw->rxtx); } int prestera_rxtx_port_init(struct prestera_port *port) { port->dev->needed_headroom = PRESTERA_DSA_HLEN; return 0; } netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb) { struct prestera_dsa dsa; dsa.hw_dev_num = port->dev_id; dsa.port_num = port->hw_id; if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0) return NET_XMIT_DROP; skb_push(skb, PRESTERA_DSA_HLEN); memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN); if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0) return NET_XMIT_DROP; return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */ #include <linux/kernel.h> #include <linux/list.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_flow.h" #include "prestera_flower.h" #include "prestera_matchall.h" #include "prestera_span.h" static int prestera_mall_prio_check(struct prestera_flow_block *block, struct tc_cls_matchall_offload *f) { u32 flower_prio_min; u32 flower_prio_max; int err; err = prestera_flower_prio_get(block, f->common.chain_index, &flower_prio_min, &flower_prio_max); if (err == -ENOENT) /* No flower filters installed on this chain. */ return 0; if (err) { NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities"); return err; } if (f->common.prio <= flower_prio_max && !block->ingress) { NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules"); return -EOPNOTSUPP; } if (f->common.prio >= flower_prio_min && block->ingress) { NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules"); return -EOPNOTSUPP; } return 0; } int prestera_mall_prio_get(struct prestera_flow_block *block, u32 *prio_min, u32 *prio_max) { if (!block->mall.bound) return -ENOENT; *prio_min = block->mall.prio_min; *prio_max = block->mall.prio_max; return 0; } static void prestera_mall_prio_update(struct prestera_flow_block *block, struct tc_cls_matchall_offload *f) { block->mall.prio_min = min(block->mall.prio_min, f->common.prio); block->mall.prio_max = max(block->mall.prio_max, f->common.prio); } int prestera_mall_replace(struct prestera_flow_block *block, struct tc_cls_matchall_offload *f) { struct prestera_flow_block_binding *binding; __be16 protocol = f->common.protocol; struct flow_action_entry *act; struct prestera_port *port; int err; if (!flow_offload_has_one_action(&f->rule->action)) { NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported"); return -EOPNOTSUPP; } act = &f->rule->action.entries[0]; if (!prestera_netdev_check(act->dev)) { NL_SET_ERR_MSG(f->common.extack, "Only Marvell Prestera port is supported"); return -EINVAL; } if (!tc_cls_can_offload_and_chain0(act->dev, &f->common)) return -EOPNOTSUPP; if (act->id != FLOW_ACTION_MIRRED) return -EOPNOTSUPP; if (protocol != htons(ETH_P_ALL)) return -EOPNOTSUPP; err = prestera_mall_prio_check(block, f); if (err) return err; port = netdev_priv(act->dev); list_for_each_entry(binding, &block->binding_list, list) { err = prestera_span_rule_add(binding, port, block->ingress); if (err == -EEXIST) return err; if (err) goto rollback; } prestera_mall_prio_update(block, f); block->mall.bound = true; return 0; rollback: list_for_each_entry_continue_reverse(binding, &block->binding_list, list) prestera_span_rule_del(binding, block->ingress); return err; } void prestera_mall_destroy(struct prestera_flow_block *block) { struct prestera_flow_block_binding *binding; list_for_each_entry(binding, &block->binding_list, list) prestera_span_rule_del(binding, block->ingress); block->mall.prio_min = UINT_MAX; block->mall.prio_max = 0; block->mall.bound = false; }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_matchall.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/string.h> #include "prestera_dsa.h" #define PRESTERA_DSA_W0_CMD GENMASK(31, 30) #define PRESTERA_DSA_W0_IS_TAGGED BIT(29) #define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24) #define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19) #define PRESTERA_DSA_W0_VPT GENMASK(15, 13) #define PRESTERA_DSA_W0_EXT_BIT BIT(12) #define PRESTERA_DSA_W0_VID GENMASK(11, 0) #define PRESTERA_DSA_W1_EXT_BIT BIT(31) #define PRESTERA_DSA_W1_CFI_BIT BIT(30) #define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10) #define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0) #define PRESTERA_DSA_W2_EXT_BIT BIT(31) #define PRESTERA_DSA_W2_PORT_NUM BIT(20) #define PRESTERA_DSA_W3_VID GENMASK(30, 27) #define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7) #define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0) #define PRESTERA_DSA_VID GENMASK(15, 12) #define PRESTERA_DSA_DEV_NUM GENMASK(11, 5) int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf) { __be32 *dsa_words = (__be32 *)dsa_buf; enum prestera_dsa_cmd cmd; u32 words[4]; u32 field; words[0] = ntohl(dsa_words[0]); words[1] = ntohl(dsa_words[1]); words[2] = ntohl(dsa_words[2]); words[3] = ntohl(dsa_words[3]); /* set the common parameters */ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]); /* only to CPU is supported */ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU)) return -EINVAL; if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0) return -EINVAL; if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0) return -EINVAL; if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0) return -EINVAL; field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]); dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]); dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]); dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]); dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]); dsa->vlan.vid &= ~PRESTERA_DSA_VID; dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field); field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]); dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]); dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field); dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) | (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) | (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7); dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]); return 0; } int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf) { __be32 *dsa_words = (__be32 *)dsa_buf; u32 dev_num = dsa->hw_dev_num; u32 words[4] = { 0 }; words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU); words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num); dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num); words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num); words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num); words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1); words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1); words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1); dsa_words[0] = htonl(words[0]); dsa_words[1] = htonl(words[1]); dsa_words[2] = htonl(words[2]); dsa_words[3] = htonl(words[3]); return 0; }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_dsa.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/bitfield.h> #include <linux/circ_buf.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include "prestera.h" #define PRESTERA_MSG_MAX_SIZE 1500 #define PRESTERA_SUPP_FW_MAJ_VER 4 #define PRESTERA_SUPP_FW_MIN_VER 1 #define PRESTERA_PREV_FW_MAJ_VER 4 #define PRESTERA_PREV_FW_MIN_VER 0 #define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img" #define PRESTERA_FW_ARM64_PATH_FMT "mrvl/prestera/mvsw_prestera_fw_arm64-v%u.%u.img" #define PRESTERA_FW_HDR_MAGIC 0x351D9D06 #define PRESTERA_FW_DL_TIMEOUT_MS 50000 #define PRESTERA_FW_BLK_SZ 1024 #define PRESTERA_FW_VER_MAJ_MUL 1000000 #define PRESTERA_FW_VER_MIN_MUL 1000 #define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL) #define PRESTERA_FW_VER_MIN(v) \ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \ PRESTERA_FW_VER_MIN_MUL) #define PRESTERA_FW_VER_PATCH(v) \ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL)) enum prestera_pci_bar_t { PRESTERA_PCI_BAR_FW = 2, PRESTERA_PCI_BAR_PP = 4, }; struct prestera_fw_header { __be32 magic_number; __be32 version_value; u8 reserved[8]; }; struct prestera_ldr_regs { u32 ldr_ready; u32 pad1; u32 ldr_img_size; u32 ldr_ctl_flags; u32 ldr_buf_offs; u32 ldr_buf_size; u32 ldr_buf_rd; u32 pad2; u32 ldr_buf_wr; u32 ldr_status; }; #define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f) #define PRESTERA_LDR_READY_MAGIC 0xf00dfeed #define PRESTERA_LDR_STATUS_IMG_DL BIT(0) #define PRESTERA_LDR_STATUS_START_FW BIT(1) #define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2) #define PRESTERA_LDR_STATUS_NOMEM BIT(3) #define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs) #define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg)) /* fw loader registers */ #define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready) #define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size) #define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags) #define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size) #define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs) #define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd) #define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr) #define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status) #define PRESTERA_LDR_CTL_DL_START BIT(0) #define PRESTERA_EVT_QNUM_MAX 4 struct prestera_fw_evtq_regs { u32 rd_idx; u32 pad1; u32 wr_idx; u32 pad2; u32 offs; u32 len; }; #define PRESTERA_CMD_QNUM_MAX 4 struct prestera_fw_cmdq_regs { u32 req_ctl; u32 req_len; u32 rcv_ctl; u32 rcv_len; u32 offs; u32 len; }; struct prestera_fw_regs { u32 fw_ready; u32 cmd_offs; u32 cmd_len; u32 cmd_qnum; u32 evt_offs; u32 evt_qnum; u32 fw_status; u32 rx_status; struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX]; struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX]; }; #define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f) #define PRESTERA_FW_READY_MAGIC 0xcafebabe /* fw registers */ #define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready) #define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs) #define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len) #define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum) #define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs) #define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum) #define PRESTERA_CMDQ_REG_OFFSET(q, f) \ (PRESTERA_FW_REG_OFFSET(cmdq_list) + \ (q) * sizeof(struct prestera_fw_cmdq_regs) + \ offsetof(struct prestera_fw_cmdq_regs, f)) #define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl) #define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len) #define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl) #define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len) #define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs) #define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len) #define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status) #define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status) /* PRESTERA_CMD_REQ_CTL_REG flags */ #define PRESTERA_CMD_F_REQ_SENT BIT(0) #define PRESTERA_CMD_F_REPL_RCVD BIT(1) /* PRESTERA_CMD_RCV_CTL_REG flags */ #define PRESTERA_CMD_F_REPL_SENT BIT(0) #define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0) #define PRESTERA_FW_EVT_CTL_STATUS_ON 0 #define PRESTERA_FW_EVT_CTL_STATUS_OFF 1 #define PRESTERA_EVTQ_REG_OFFSET(q, f) \ (PRESTERA_FW_REG_OFFSET(evtq_list) + \ (q) * sizeof(struct prestera_fw_evtq_regs) + \ offsetof(struct prestera_fw_evtq_regs, f)) #define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx) #define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx) #define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs) #define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len) #define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs) #define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg) #define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000 #define PRESTERA_FW_READY_WAIT_MS 20000 #define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804 #define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C #define PRESTERA_DEV_ID_ALDRIN2 0xCC1E #define PRESTERA_DEV_ID_98DX7312M 0x981F #define PRESTERA_DEV_ID_98DX3500 0x9820 #define PRESTERA_DEV_ID_98DX3501 0x9826 #define PRESTERA_DEV_ID_98DX3510 0x9821 #define PRESTERA_DEV_ID_98DX3520 0x9822 struct prestera_fw_evtq { u8 __iomem *addr; size_t len; }; struct prestera_fw_cmdq { /* serialize access to dev->send_req */ struct mutex cmd_mtx; u8 __iomem *addr; size_t len; }; struct prestera_fw { struct prestera_fw_rev rev_supp; const struct firmware *bin; struct workqueue_struct *wq; struct prestera_device dev; struct pci_dev *pci_dev; u8 __iomem *ldr_regs; u8 __iomem *ldr_ring_buf; u32 ldr_buf_len; u32 ldr_wr_idx; size_t cmd_mbox_len; u8 __iomem *cmd_mbox; struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX]; u8 cmd_qnum; struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX]; u8 evt_qnum; struct work_struct evt_work; u8 __iomem *evt_buf; u8 *evt_msg; }; static int prestera_fw_load(struct prestera_fw *fw); static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val) { writel(val, PRESTERA_FW_REG_ADDR(fw, reg)); } static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg) { return readl(PRESTERA_FW_REG_ADDR(fw, reg)); } static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) { return fw->evt_queue[qid].len; } static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) { u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); } static void prestera_fw_evtq_rd_set(struct prestera_fw *fw, u8 qid, u32 idx) { u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); } static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) { return fw->evt_queue[qid].addr; } static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid) { u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); u32 val; val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx); prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4); return val; } static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw, u8 qid, void *buf, size_t len) { u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid); u32 *buf32 = buf; int i; for (i = 0; i < len / 4; buf32++, i++) { *buf32 = readl_relaxed(evtq_addr + idx); idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1); } prestera_fw_evtq_rd_set(fw, qid, idx); return i; } static u8 prestera_fw_evtq_pick(struct prestera_fw *fw) { int qid; for (qid = 0; qid < fw->evt_qnum; qid++) { if (prestera_fw_evtq_avail(fw, qid) >= 4) return qid; } return PRESTERA_EVT_QNUM_MAX; } static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val) { u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG); u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK); prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status); } static void prestera_fw_evt_work_fn(struct work_struct *work) { struct prestera_fw *fw; void *msg; u8 qid; fw = container_of(work, struct prestera_fw, evt_work); msg = fw->evt_msg; prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF); while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) { u32 idx; u32 len; len = prestera_fw_evtq_read32(fw, qid); idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); WARN_ON(prestera_fw_evtq_avail(fw, qid) < len); if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) { prestera_fw_evtq_rd_set(fw, qid, idx + len); continue; } prestera_fw_evtq_read_buf(fw, qid, msg, len); if (fw->dev.recv_msg) fw->dev.recv_msg(&fw->dev, msg, len); } prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON); } static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, unsigned int waitms) { u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg); u32 val; return readl_poll_timeout(addr, val, cmp == val, 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); } static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid) { mutex_lock(&fw->cmd_queue[qid].cmd_mtx); } static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid) { mutex_unlock(&fw->cmd_queue[qid].cmd_mtx); } static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid) { return fw->cmd_queue[qid].len; } static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid) { return fw->cmd_queue[qid].addr; } static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid, void *in_msg, size_t in_size, void *out_msg, size_t out_size, unsigned int waitms) { u32 ret_size; int err; if (!waitms) waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS; if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid)) return -EMSGSIZE; /* wait for finish previous reply from FW */ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30); if (err) { dev_err(fw->dev.dev, "finish reply from FW is timed out\n"); return err; } prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size); memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size); prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), PRESTERA_CMD_F_REQ_SENT); /* wait for reply from FW */ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), PRESTERA_CMD_F_REPL_SENT, waitms); if (err) { dev_err(fw->dev.dev, "reply from FW is timed out\n"); goto cmd_exit; } ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid)); if (ret_size > out_size) { dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n", ret_size, out_size); err = -EMSGSIZE; goto cmd_exit; } memcpy_fromio(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size); cmd_exit: prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid), PRESTERA_CMD_F_REPL_RCVD); return err; } static int prestera_fw_send_req(struct prestera_device *dev, int qid, void *in_msg, size_t in_size, void *out_msg, size_t out_size, unsigned int waitms) { struct prestera_fw *fw; ssize_t ret; fw = container_of(dev, struct prestera_fw, dev); prestera_fw_cmdq_lock(fw, qid); ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size, waitms); prestera_fw_cmdq_unlock(fw, qid); return ret; } static int prestera_fw_init(struct prestera_fw *fw) { u8 __iomem *base; int err; u8 qid; fw->dev.send_req = prestera_fw_send_req; fw->ldr_regs = fw->dev.ctl_regs; err = prestera_fw_load(fw); if (err) return err; err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG, PRESTERA_FW_READY_MAGIC, PRESTERA_FW_READY_WAIT_MS); if (err) { dev_err(fw->dev.dev, "FW failed to start\n"); return err; } base = fw->dev.ctl_regs; fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG); fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG); fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG); for (qid = 0; qid < fw->cmd_qnum; qid++) { u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid)); struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid]; cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid)); cmdq->addr = fw->cmd_mbox + offs; mutex_init(&cmdq->cmd_mtx); } fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG); fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG); fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL); if (!fw->evt_msg) return -ENOMEM; for (qid = 0; qid < fw->evt_qnum; qid++) { u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid)); struct prestera_fw_evtq *evtq = &fw->evt_queue[qid]; evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid)); evtq->addr = fw->evt_buf + offs; } return 0; } static void prestera_fw_uninit(struct prestera_fw *fw) { kfree(fw->evt_msg); } static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id) { struct prestera_fw *fw = dev_id; if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) { prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0); if (fw->dev.recv_pkt) fw->dev.recv_pkt(&fw->dev); } queue_work(fw->wq, &fw->evt_work); return IRQ_HANDLED; } static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val) { writel(val, PRESTERA_LDR_REG_ADDR(fw, reg)); } static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg) { return readl(PRESTERA_LDR_REG_ADDR(fw, reg)); } static int prestera_ldr_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp, unsigned int waitms) { u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg); u32 val; return readl_poll_timeout(addr, val, cmp == val, 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC); } static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len) { u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG); u32 buf_len = fw->ldr_buf_len; u32 wr_idx = fw->ldr_wr_idx; u32 rd_idx; return readl_poll_timeout(addr, rd_idx, CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len, 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC); } static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw) { u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG); unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL); u32 val; int err; err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC, PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC); if (err) { dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]", prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG)); return err; } return 0; } static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n) { fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1); } static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw) { prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx); } static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw) { return fw->ldr_ring_buf + fw->ldr_wr_idx; } static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len) { int err; int i; err = prestera_ldr_wait_buf(fw, len); if (err) { dev_err(fw->dev.dev, "failed wait for sending firmware\n"); return err; } for (i = 0; i < len; i += 4) { writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw)); prestera_ldr_wr_idx_move(fw, 4); } prestera_ldr_wr_idx_commit(fw); return 0; } static int prestera_ldr_fw_send(struct prestera_fw *fw, const char *img, u32 fw_size) { u32 status; u32 pos; int err; err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG, PRESTERA_LDR_STATUS_IMG_DL, 5 * MSEC_PER_SEC); if (err) { dev_err(fw->dev.dev, "Loader is not ready to load image\n"); return err; } for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) { if (pos + PRESTERA_FW_BLK_SZ > fw_size) break; err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ); if (err) return err; } if (pos < fw_size) { err = prestera_ldr_send(fw, img + pos, fw_size - pos); if (err) return err; } err = prestera_ldr_wait_dl_finish(fw); if (err) return err; status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG); switch (status) { case PRESTERA_LDR_STATUS_INVALID_IMG: dev_err(fw->dev.dev, "FW img has bad CRC\n"); return -EINVAL; case PRESTERA_LDR_STATUS_NOMEM: dev_err(fw->dev.dev, "Loader has no enough mem\n"); return -ENOMEM; } return 0; } static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr, struct prestera_fw_rev *rev) { u32 version = be32_to_cpu(hdr->version_value); rev->maj = PRESTERA_FW_VER_MAJ(version); rev->min = PRESTERA_FW_VER_MIN(version); rev->sub = PRESTERA_FW_VER_PATCH(version); } static int prestera_fw_rev_check(struct prestera_fw *fw) { struct prestera_fw_rev *rev = &fw->dev.fw_rev; if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min) return 0; dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'", fw->rev_supp.maj, fw->rev_supp.min); return -EINVAL; } static int prestera_fw_hdr_parse(struct prestera_fw *fw) { struct prestera_fw_rev *rev = &fw->dev.fw_rev; struct prestera_fw_header *hdr; u32 magic; hdr = (struct prestera_fw_header *)fw->bin->data; magic = be32_to_cpu(hdr->magic_number); if (magic != PRESTERA_FW_HDR_MAGIC) { dev_err(fw->dev.dev, "FW img hdr magic is invalid"); return -EINVAL; } prestera_fw_rev_parse(hdr, rev); dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n", rev->maj, rev->min, rev->sub); return prestera_fw_rev_check(fw); } static const char *prestera_fw_path_fmt_get(struct prestera_fw *fw) { switch (fw->pci_dev->device) { case PRESTERA_DEV_ID_98DX3500: case PRESTERA_DEV_ID_98DX3501: case PRESTERA_DEV_ID_98DX3510: case PRESTERA_DEV_ID_98DX3520: return PRESTERA_FW_ARM64_PATH_FMT; default: return PRESTERA_FW_PATH_FMT; } } static int prestera_fw_get(struct prestera_fw *fw) { int ver_maj = PRESTERA_SUPP_FW_MAJ_VER; int ver_min = PRESTERA_SUPP_FW_MIN_VER; char fw_path[128]; int err; pick_fw_ver: snprintf(fw_path, sizeof(fw_path), prestera_fw_path_fmt_get(fw), ver_maj, ver_min); err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); if (err) { if (ver_maj != PRESTERA_PREV_FW_MAJ_VER || ver_min != PRESTERA_PREV_FW_MIN_VER) { ver_maj = PRESTERA_PREV_FW_MAJ_VER; ver_min = PRESTERA_PREV_FW_MIN_VER; dev_warn(fw->dev.dev, "missing latest %s firmware, fall-back to previous %u.%u version\n", fw_path, ver_maj, ver_min); goto pick_fw_ver; } else { dev_err(fw->dev.dev, "failed to request previous firmware: %s\n", fw_path); return err; } } dev_info(fw->dev.dev, "Loading %s ...", fw_path); fw->rev_supp.maj = ver_maj; fw->rev_supp.min = ver_min; fw->rev_supp.sub = 0; return 0; } static void prestera_fw_put(struct prestera_fw *fw) { release_firmware(fw->bin); } static int prestera_fw_load(struct prestera_fw *fw) { size_t hlen = sizeof(struct prestera_fw_header); int err; err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG, PRESTERA_LDR_READY_MAGIC, 5 * MSEC_PER_SEC); if (err) { dev_err(fw->dev.dev, "waiting for FW loader is timed out"); return err; } fw->ldr_ring_buf = fw->ldr_regs + prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG); fw->ldr_buf_len = prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG); fw->ldr_wr_idx = 0; err = prestera_fw_get(fw); if (err) return err; err = prestera_fw_hdr_parse(fw); if (err) { dev_err(fw->dev.dev, "FW image header is invalid\n"); goto out_release; } prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen); prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START); err = prestera_ldr_fw_send(fw, fw->bin->data + hlen, fw->bin->size - hlen); out_release: prestera_fw_put(fw); return err; } static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev) { switch (pdev->device) { case PRESTERA_DEV_ID_98DX7312M: case PRESTERA_DEV_ID_98DX3500: case PRESTERA_DEV_ID_98DX3501: case PRESTERA_DEV_ID_98DX3510: case PRESTERA_DEV_ID_98DX3520: return true; default: return false; } } static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev) { if (pci_resource_len(pdev, 2) == 0x1000000) return 0x0; else return (pci_resource_len(pdev, 2) / 2); } static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev) { if (pci_resource_len(pdev, 2) == 0x1000000) return 0x400000; else return 0x0; } static int prestera_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = dev_driver_string(&pdev->dev); u8 __iomem *mem_addr, *pp_addr = NULL; struct prestera_fw *fw; int err; err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_device failed\n"); goto err_pci_enable_device; } err = pci_request_regions(pdev, driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto err_pci_request_regions; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30)); if (err) { dev_err(&pdev->dev, "fail to set DMA mask\n"); goto err_dma_mask; } mem_addr = pcim_iomap(pdev, 2, 0); if (!mem_addr) { dev_err(&pdev->dev, "pci mem ioremap failed\n"); err = -EIO; goto err_mem_ioremap; } /* AC5X devices use second half of BAR2 */ if (prestera_pci_pp_use_bar2(pdev)) { pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev); mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev); } else { pp_addr = pcim_iomap(pdev, 4, 0); if (!pp_addr) { dev_err(&pdev->dev, "pp regs ioremap failed\n"); err = -EIO; goto err_pp_ioremap; } } pci_set_master(pdev); fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL); if (!fw) { err = -ENOMEM; goto err_pci_dev_alloc; } fw->pci_dev = pdev; fw->dev.ctl_regs = mem_addr; fw->dev.pp_regs = pp_addr; fw->dev.dev = &pdev->dev; pci_set_drvdata(pdev, fw); err = prestera_fw_init(fw); if (err) goto err_prestera_fw_init; dev_info(fw->dev.dev, "Prestera FW is ready\n"); fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1); if (!fw->wq) { err = -ENOMEM; goto err_wq_alloc; } INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn); err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (err < 0) { dev_err(&pdev->dev, "MSI IRQ init failed\n"); goto err_irq_alloc; } err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler, 0, driver_name, fw); if (err) { dev_err(&pdev->dev, "fail to request IRQ\n"); goto err_request_irq; } err = prestera_device_register(&fw->dev); if (err) goto err_prestera_dev_register; return 0; err_prestera_dev_register: free_irq(pci_irq_vector(pdev, 0), fw); err_request_irq: pci_free_irq_vectors(pdev); err_irq_alloc: destroy_workqueue(fw->wq); err_wq_alloc: prestera_fw_uninit(fw); err_prestera_fw_init: err_pci_dev_alloc: err_pp_ioremap: err_mem_ioremap: err_dma_mask: pci_release_regions(pdev); err_pci_request_regions: err_pci_enable_device: return err; } static void prestera_pci_remove(struct pci_dev *pdev) { struct prestera_fw *fw = pci_get_drvdata(pdev); prestera_device_unregister(&fw->dev); free_irq(pci_irq_vector(pdev, 0), fw); pci_free_irq_vectors(pdev); destroy_workqueue(fw->wq); prestera_fw_uninit(fw); pci_release_regions(pdev); } static const struct pci_device_id prestera_pci_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_55) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_65) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_ALDRIN2) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX7312M) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3500) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3501) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3510) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3520) }, { } }; MODULE_DEVICE_TABLE(pci, prestera_pci_devices); static struct pci_driver prestera_pci_driver = { .name = "Prestera DX", .id_table = prestera_pci_devices, .probe = prestera_pci_probe, .remove = prestera_pci_remove, }; module_pci_driver(prestera_pci_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
linux-master
drivers/net/ethernet/marvell/prestera/prestera_pci.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/inetdevice.h> #include <net/inet_dscp.h> #include <net/switchdev.h> #include <linux/rhashtable.h> #include <net/nexthop.h> #include <net/arp.h> #include <linux/if_vlan.h> #include <linux/if_macvlan.h> #include <net/netevent.h> #include "prestera.h" #include "prestera_router_hw.h" #define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH #define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */ struct prestera_kern_neigh_cache_key { struct prestera_ip_addr addr; struct net_device *dev; }; struct prestera_kern_neigh_cache { struct prestera_kern_neigh_cache_key key; struct rhash_head ht_node; struct list_head kern_fib_cache_list; /* Hold prepared nh_neigh info if is in_kernel */ struct prestera_neigh_info nh_neigh_info; /* Indicate if neighbour is reachable by direct route */ bool reachable; /* Lock cache if neigh is present in kernel */ bool in_kernel; }; struct prestera_kern_fib_cache_key { struct prestera_ip_addr addr; u32 prefix_len; u32 kern_tb_id; /* tb_id from kernel (not fixed) */ }; /* Subscribing on neighbours in kernel */ struct prestera_kern_fib_cache { struct prestera_kern_fib_cache_key key; struct { struct prestera_fib_key fib_key; enum prestera_fib_type fib_type; struct prestera_nexthop_group_key nh_grp_key; } lpm_info; /* hold prepared lpm info */ /* Indicate if route is not overlapped by another table */ struct rhash_head ht_node; /* node of prestera_router */ struct prestera_kern_neigh_cache_head { struct prestera_kern_fib_cache *this; struct list_head head; struct prestera_kern_neigh_cache *n_cache; } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX]; union { struct fib_notifier_info info; /* point to any of 4/6 */ struct fib_entry_notifier_info fen4_info; }; bool reachable; }; static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = { .key_offset = offsetof(struct prestera_kern_neigh_cache, key), .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node), .key_len = sizeof(struct prestera_kern_neigh_cache_key), .automatic_shrinking = true, }; static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = { .key_offset = offsetof(struct prestera_kern_fib_cache, key), .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node), .key_len = sizeof(struct prestera_kern_fib_cache_key), .automatic_shrinking = true, }; /* This util to be used, to convert kernel rules for default vr in hw_vr */ static u32 prestera_fix_tb_id(u32 tb_id) { if (tb_id == RT_TABLE_UNSPEC || tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT) tb_id = RT_TABLE_MAIN; return tb_id; } static void prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info, struct prestera_kern_fib_cache_key *key) { struct fib_entry_notifier_info *fen_info = container_of(info, struct fib_entry_notifier_info, info); memset(key, 0, sizeof(*key)); key->addr.v = PRESTERA_IPV4; key->addr.u.ipv4 = cpu_to_be32(fen_info->dst); key->prefix_len = fen_info->dst_len; key->kern_tb_id = fen_info->tb_id; } static int prestera_util_nhc2nc_key(struct prestera_switch *sw, struct fib_nh_common *nhc, struct prestera_kern_neigh_cache_key *nk) { memset(nk, 0, sizeof(*nk)); if (nhc->nhc_gw_family == AF_INET) { nk->addr.v = PRESTERA_IPV4; nk->addr.u.ipv4 = nhc->nhc_gw.ipv4; } else { nk->addr.v = PRESTERA_IPV6; nk->addr.u.ipv6 = nhc->nhc_gw.ipv6; } nk->dev = nhc->nhc_dev; return 0; } static void prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck, struct prestera_nh_neigh_key *nk) { memset(nk, 0, sizeof(*nk)); nk->addr = ck->addr; nk->rif = (void *)ck->dev; } static bool prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw, struct fib_nh_common *nhc, struct prestera_kern_neigh_cache_key *nk) { struct prestera_kern_neigh_cache_key tk; int err; err = prestera_util_nhc2nc_key(sw, nhc, &tk); if (err) return false; if (memcmp(&tk, nk, sizeof(tk))) return false; return true; } static int prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n, struct prestera_kern_neigh_cache_key *key) { memset(key, 0, sizeof(*key)); if (n->tbl->family == AF_INET) { key->addr.v = PRESTERA_IPV4; key->addr.u.ipv4 = *(__be32 *)n->primary_key; } else { return -ENOENT; } key->dev = n->dev; return 0; } static bool __prestera_fi_is_direct(struct fib_info *fi) { struct fib_nh_common *fib_nhc; if (fib_info_num_path(fi) == 1) { fib_nhc = fib_info_nhc(fi, 0); if (fib_nhc->nhc_gw_family == AF_UNSPEC) return true; } return false; } static bool prestera_fi_is_direct(struct fib_info *fi) { if (fi->fib_type != RTN_UNICAST) return false; return __prestera_fi_is_direct(fi); } static bool prestera_fi_is_nh(struct fib_info *fi) { if (fi->fib_type != RTN_UNICAST) return false; return !__prestera_fi_is_direct(fi); } static bool __prestera_fi6_is_direct(struct fib6_info *fi) { if (!fi->fib6_nh->nh_common.nhc_gw_family) return true; return false; } static bool prestera_fi6_is_direct(struct fib6_info *fi) { if (fi->fib6_type != RTN_UNICAST) return false; return __prestera_fi6_is_direct(fi); } static bool prestera_fi6_is_nh(struct fib6_info *fi) { if (fi->fib6_type != RTN_UNICAST) return false; return !__prestera_fi6_is_direct(fi); } static bool prestera_fib_info_is_direct(struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info = container_of(info, struct fib6_entry_notifier_info, info); struct fib_entry_notifier_info *fen_info = container_of(info, struct fib_entry_notifier_info, info); if (info->family == AF_INET) return prestera_fi_is_direct(fen_info->fi); else return prestera_fi6_is_direct(fen6_info->rt); } static bool prestera_fib_info_is_nh(struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info = container_of(info, struct fib6_entry_notifier_info, info); struct fib_entry_notifier_info *fen_info = container_of(info, struct fib_entry_notifier_info, info); if (info->family == AF_INET) return prestera_fi_is_nh(fen_info->fi); else return prestera_fi6_is_nh(fen6_info->rt); } /* must be called with rcu_read_lock() */ static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id, __be32 *addr) { struct flowi4 fl4; /* TODO: walkthrough appropriate tables in kernel * to know if the same prefix exists in several tables */ memset(&fl4, 0, sizeof(fl4)); fl4.daddr = *addr; return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */); } static bool __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr, struct net_device *dev) { struct fib_nh_common *fib_nhc; struct fib_result res; bool reachable; reachable = false; if (!prestera_util_kern_get_route(&res, tb_id, addr)) if (prestera_fi_is_direct(res.fi)) { fib_nhc = fib_info_nhc(res.fi, 0); if (dev == fib_nhc->nhc_dev) reachable = true; } return reachable; } /* Check if neigh route is reachable */ static bool prestera_util_kern_n_is_reachable(u32 tb_id, struct prestera_ip_addr *addr, struct net_device *dev) { if (addr->v == PRESTERA_IPV4) return __prestera_util_kern_n_is_reachable_v4(tb_id, &addr->u.ipv4, dev); else return false; } static void prestera_util_kern_set_neigh_offload(struct neighbour *n, bool offloaded) { if (offloaded) n->flags |= NTF_OFFLOADED; else n->flags &= ~NTF_OFFLOADED; } static void prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap) { if (offloaded) nhc->nhc_flags |= RTNH_F_OFFLOAD; else nhc->nhc_flags &= ~RTNH_F_OFFLOAD; if (trap) nhc->nhc_flags |= RTNH_F_TRAP; else nhc->nhc_flags &= ~RTNH_F_TRAP; } static struct fib_nh_common * prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n) { struct fib6_entry_notifier_info *fen6_info; struct fib_entry_notifier_info *fen4_info; struct fib6_info *iter; if (info->family == AF_INET) { fen4_info = container_of(info, struct fib_entry_notifier_info, info); return fib_info_nhc(fen4_info->fi, n); } else if (info->family == AF_INET6) { fen6_info = container_of(info, struct fib6_entry_notifier_info, info); if (!n) return &fen6_info->rt->fib6_nh->nh_common; list_for_each_entry(iter, &fen6_info->rt->fib6_siblings, fib6_siblings) { if (!--n) return &iter->fib6_nh->nh_common; } } /* if family is incorrect - than upper functions has BUG */ /* if doesn't find requested index - there is alsi bug, because * valid index must be produced by nhs, which checks list length */ WARN(1, "Invalid parameters passed to %s n=%d i=%p", __func__, n, info); return NULL; } static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info; struct fib_entry_notifier_info *fen4_info; if (info->family == AF_INET) { fen4_info = container_of(info, struct fib_entry_notifier_info, info); return fib_info_num_path(fen4_info->fi); } else if (info->family == AF_INET6) { fen6_info = container_of(info, struct fib6_entry_notifier_info, info); return fen6_info->rt->fib6_nsiblings + 1; } return 0; } static unsigned char prestera_kern_fib_info_type(struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info; struct fib_entry_notifier_info *fen4_info; if (info->family == AF_INET) { fen4_info = container_of(info, struct fib_entry_notifier_info, info); return fen4_info->fi->fib_type; } else if (info->family == AF_INET6) { fen6_info = container_of(info, struct fib6_entry_notifier_info, info); /* TODO: ECMP in ipv6 is several routes. * Every route has single nh. */ return fen6_info->rt->fib6_type; } return RTN_UNSPEC; } /* Decided, that uc_nh route with key==nh is obviously neighbour route */ static bool prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node) { if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH) return false; if (fib_node->info.nh_grp->nh_neigh_head[1].neigh) return false; if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh) return false; if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr, &fib_node->key.addr, sizeof(struct prestera_ip_addr))) return false; return true; } static int prestera_dev_if_type(const struct net_device *dev) { struct macvlan_dev *vlan; if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev))) { return PRESTERA_IF_VID_E; } else if (netif_is_bridge_master(dev)) { return PRESTERA_IF_VID_E; } else if (netif_is_lag_master(dev)) { return PRESTERA_IF_LAG_E; } else if (netif_is_macvlan(dev)) { vlan = netdev_priv(dev); return prestera_dev_if_type(vlan->lowerdev); } else { return PRESTERA_IF_PORT_E; } } static int prestera_neigh_iface_init(struct prestera_switch *sw, struct prestera_iface *iface, struct neighbour *n) { struct prestera_port *port; iface->vlan_id = 0; /* TODO: vlan egress */ iface->type = prestera_dev_if_type(n->dev); if (iface->type != PRESTERA_IF_PORT_E) return -EINVAL; if (!prestera_netdev_check(n->dev)) return -EINVAL; port = netdev_priv(n->dev); iface->dev_port.hw_dev_num = port->dev_id; iface->dev_port.port_num = port->hw_id; return 0; } static struct prestera_kern_neigh_cache * prestera_kern_neigh_cache_find(struct prestera_switch *sw, struct prestera_kern_neigh_cache_key *key) { struct prestera_kern_neigh_cache *n_cache; n_cache = rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key, __prestera_kern_neigh_cache_ht_params); return n_cache; } static void __prestera_kern_neigh_cache_destruct(struct prestera_switch *sw, struct prestera_kern_neigh_cache *n_cache) { dev_put(n_cache->key.dev); } static void __prestera_kern_neigh_cache_destroy(struct prestera_switch *sw, struct prestera_kern_neigh_cache *n_cache) { rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht, &n_cache->ht_node, __prestera_kern_neigh_cache_ht_params); __prestera_kern_neigh_cache_destruct(sw, n_cache); kfree(n_cache); } static struct prestera_kern_neigh_cache * __prestera_kern_neigh_cache_create(struct prestera_switch *sw, struct prestera_kern_neigh_cache_key *key) { struct prestera_kern_neigh_cache *n_cache; int err; n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL); if (!n_cache) goto err_kzalloc; memcpy(&n_cache->key, key, sizeof(*key)); dev_hold(n_cache->key.dev); INIT_LIST_HEAD(&n_cache->kern_fib_cache_list); err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht, &n_cache->ht_node, __prestera_kern_neigh_cache_ht_params); if (err) goto err_ht_insert; return n_cache; err_ht_insert: dev_put(n_cache->key.dev); kfree(n_cache); err_kzalloc: return NULL; } static struct prestera_kern_neigh_cache * prestera_kern_neigh_cache_get(struct prestera_switch *sw, struct prestera_kern_neigh_cache_key *key) { struct prestera_kern_neigh_cache *n_cache; n_cache = prestera_kern_neigh_cache_find(sw, key); if (!n_cache) n_cache = __prestera_kern_neigh_cache_create(sw, key); return n_cache; } static struct prestera_kern_neigh_cache * prestera_kern_neigh_cache_put(struct prestera_switch *sw, struct prestera_kern_neigh_cache *n_cache) { if (!n_cache->in_kernel && list_empty(&n_cache->kern_fib_cache_list)) { __prestera_kern_neigh_cache_destroy(sw, n_cache); return NULL; } return n_cache; } static struct prestera_kern_fib_cache * prestera_kern_fib_cache_find(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key) { struct prestera_kern_fib_cache *fib_cache; fib_cache = rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key, __prestera_kern_fib_cache_ht_params); return fib_cache; } static void __prestera_kern_fib_cache_destruct(struct prestera_switch *sw, struct prestera_kern_fib_cache *fib_cache) { struct prestera_kern_neigh_cache *n_cache; int i; for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) { n_cache = fib_cache->kern_neigh_cache_head[i].n_cache; if (n_cache) { list_del(&fib_cache->kern_neigh_cache_head[i].head); prestera_kern_neigh_cache_put(sw, n_cache); } } fib_info_put(fib_cache->fen4_info.fi); } static void prestera_kern_fib_cache_destroy(struct prestera_switch *sw, struct prestera_kern_fib_cache *fib_cache) { rhashtable_remove_fast(&sw->router->kern_fib_cache_ht, &fib_cache->ht_node, __prestera_kern_fib_cache_ht_params); __prestera_kern_fib_cache_destruct(sw, fib_cache); kfree(fib_cache); } static int __prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { struct prestera_kern_neigh_cache_key nc_key; struct prestera_kern_neigh_cache *n_cache; struct fib_nh_common *nhc; int i, nhs, err; if (!prestera_fib_info_is_nh(&fc->info)) return 0; nhs = prestera_kern_fib_info_nhs(&fc->info); if (nhs > PRESTERA_NHGR_SIZE_MAX) return 0; for (i = 0; i < nhs; i++) { nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i); err = prestera_util_nhc2nc_key(sw, nhc, &nc_key); if (err) return 0; n_cache = prestera_kern_neigh_cache_get(sw, &nc_key); if (!n_cache) return 0; fc->kern_neigh_cache_head[i].this = fc; fc->kern_neigh_cache_head[i].n_cache = n_cache; list_add(&fc->kern_neigh_cache_head[i].head, &n_cache->kern_fib_cache_list); } return 0; } /* Operations on fi (offload, etc) must be wrapped in utils. * This function just create storage. */ static struct prestera_kern_fib_cache * prestera_kern_fib_cache_create(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key, struct fib_notifier_info *info) { struct fib_entry_notifier_info *fen_info = container_of(info, struct fib_entry_notifier_info, info); struct prestera_kern_fib_cache *fib_cache; int err; fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL); if (!fib_cache) goto err_kzalloc; memcpy(&fib_cache->key, key, sizeof(*key)); fib_info_hold(fen_info->fi); memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info)); err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, &fib_cache->ht_node, __prestera_kern_fib_cache_ht_params); if (err) goto err_ht_insert; /* Handle nexthops */ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache); if (err) goto out; /* Not critical */ out: return fib_cache; err_ht_insert: fib_info_put(fen_info->fi); kfree(fib_cache); err_kzalloc: return NULL; } static void __prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw, struct prestera_kern_fib_cache *fibc, struct prestera_kern_neigh_cache *nc, bool offloaded, bool trap) { struct fib_nh_common *nhc; int i, nhs; nhs = prestera_kern_fib_info_nhs(&fibc->info); for (i = 0; i < nhs; i++) { nhc = prestera_kern_fib_info_nhc(&fibc->info, i); if (!nc) { prestera_util_kern_set_nh_offload(nhc, offloaded, trap); continue; } if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) { prestera_util_kern_set_nh_offload(nhc, offloaded, trap); break; } } } static void __prestera_k_arb_n_offload_set(struct prestera_switch *sw, struct prestera_kern_neigh_cache *nc, bool offloaded) { struct neighbour *n; n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); if (!n) return; prestera_util_kern_set_neigh_offload(n, offloaded); neigh_release(n); } static void __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc, bool fail, bool offload, bool trap) { struct fib_rt_info fri; switch (fc->key.addr.v) { case PRESTERA_IPV4: fri.fi = fc->fen4_info.fi; fri.tb_id = fc->key.kern_tb_id; fri.dst = fc->key.addr.u.ipv4; fri.dst_len = fc->key.prefix_len; fri.dscp = fc->fen4_info.dscp; fri.type = fc->fen4_info.type; /* flags begin */ fri.offload = offload; fri.trap = trap; fri.offload_failed = fail; /* flags end */ fib_alias_hw_flags_set(&init_net, &fri); return; case PRESTERA_IPV6: /* TODO */ return; } } static void __prestera_k_arb_n_lpm_set(struct prestera_switch *sw, struct prestera_kern_neigh_cache *n_cache, bool enabled) { struct prestera_nexthop_group_key nh_grp_key; struct prestera_kern_fib_cache_key fc_key; struct prestera_kern_fib_cache *fib_cache; struct prestera_fib_node *fib_node; struct prestera_fib_key fib_key; /* Exception for fc with prefix 32: LPM entry is already used by fib */ memset(&fc_key, 0, sizeof(fc_key)); fc_key.addr = n_cache->key.addr; fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); /* But better to use tb_id of route, which pointed to this neighbour. */ /* We take it from rif, because rif inconsistent. * Must be separated in_rif and out_rif. * Also note: for each fib pointed to this neigh should be separated * neigh lpm entry (for each ingress vr) */ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev); fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); memset(&fib_key, 0, sizeof(fib_key)); fib_key.addr = n_cache->key.addr; fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v); fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id); fib_node = prestera_fib_node_find(sw, &fib_key); if (!fib_cache || !fib_cache->reachable) { if (!enabled && fib_node) { if (prestera_fib_node_util_is_neighbour(fib_node)) prestera_fib_node_destroy(sw, fib_node); return; } } if (enabled && !fib_node) { memset(&nh_grp_key, 0, sizeof(nh_grp_key)); prestera_util_nc_key2nh_key(&n_cache->key, &nh_grp_key.neigh[0]); fib_node = prestera_fib_node_create(sw, &fib_key, PRESTERA_FIB_TYPE_UC_NH, &nh_grp_key); if (!fib_node) pr_err("%s failed ip=%pI4n", "prestera_fib_node_create", &fib_key.addr.u.ipv4); return; } } static void __prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw, struct prestera_kern_neigh_cache *nc) { if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev), &nc->key.addr, nc->key.dev)) nc->reachable = true; else nc->reachable = false; } /* Kernel neighbour -> neigh_cache info */ static void __prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw, struct prestera_kern_neigh_cache *nc) { struct neighbour *n; int err; memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info)); n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); if (!n) goto out; read_lock_bh(&n->lock); if (n->nud_state & NUD_VALID && !n->dead) { err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface, n); if (err) goto n_read_out; memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN); nc->nh_neigh_info.connected = true; } n_read_out: read_unlock_bh(&n->lock); out: nc->in_kernel = nc->nh_neigh_info.connected; if (n) neigh_release(n); } /* neigh_cache info -> lpm update */ static void __prestera_k_arb_nc_apply(struct prestera_switch *sw, struct prestera_kern_neigh_cache *nc) { struct prestera_kern_neigh_cache_head *nhead; struct prestera_nh_neigh_key nh_key; struct prestera_nh_neigh *nh_neigh; int err; __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel); __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel); prestera_util_nc_key2nh_key(&nc->key, &nh_key); nh_neigh = prestera_nh_neigh_find(sw, &nh_key); if (!nh_neigh) goto out; /* Do hw update only if something changed to prevent nh flap */ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info, sizeof(nh_neigh->info))) { memcpy(&nh_neigh->info, &nc->nh_neigh_info, sizeof(nh_neigh->info)); err = prestera_nh_neigh_set(sw, nh_neigh); if (err) { pr_err("%s failed with err=%d ip=%pI4n mac=%pM", "prestera_nh_neigh_set", err, &nh_neigh->key.addr.u.ipv4, &nh_neigh->info.ha[0]); goto out; } } out: list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) { __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc, nc->in_kernel, !nc->in_kernel); } } static int __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { struct fib_nh_common *nhc; int nh_cnt; memset(&fc->lpm_info, 0, sizeof(fc->lpm_info)); switch (prestera_kern_fib_info_type(&fc->info)) { case RTN_UNICAST: if (prestera_fib_info_is_direct(&fc->info) && fc->key.prefix_len == PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) { /* This is special case. * When prefix is 32. Than we will have conflict in lpm * for direct route - once TRAP added, there is no * place for neighbour entry. So represent direct route * with prefix 32, as NH. So neighbour will be resolved * as nexthop of this route. */ nhc = prestera_kern_fib_info_nhc(&fc->info, 0); fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH; fc->lpm_info.nh_grp_key.neigh[0].addr = fc->key.addr; fc->lpm_info.nh_grp_key.neigh[0].rif = nhc->nhc_dev; break; } /* We can also get nh_grp_key from fi. This will be correct to * because cache not always represent, what actually written to * lpm. But we use nh cache, as well for now (for this case). */ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { if (!fc->kern_neigh_cache_head[nh_cnt].n_cache) break; fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr = fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr; fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif = fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev; } fc->lpm_info.fib_type = nh_cnt ? PRESTERA_FIB_TYPE_UC_NH : PRESTERA_FIB_TYPE_TRAP; break; /* Unsupported. Leave it for kernel: */ case RTN_BROADCAST: case RTN_MULTICAST: /* Routes we must trap by design: */ case RTN_LOCAL: case RTN_UNREACHABLE: case RTN_PROHIBIT: fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP; break; case RTN_BLACKHOLE: fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP; break; default: dev_err(sw->dev->dev, "Unsupported fib_type"); return -EOPNOTSUPP; } fc->lpm_info.fib_key.addr = fc->key.addr; fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len; fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id); return 0; } static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc, bool enabled) { struct prestera_fib_node *fib_node; fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key); if (fib_node) prestera_fib_node_destroy(sw, fib_node); if (!enabled) return 0; fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key, fc->lpm_info.fib_type, &fc->lpm_info.nh_grp_key); if (!fib_node) { dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d", &fc->key.addr.u.ipv4, fc->key.prefix_len, fc->key.kern_tb_id); return -ENOENT; } return 0; } static int __prestera_k_arb_fc_apply(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { int err; err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc); if (err) return err; err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable); if (err) { __prestera_k_arb_fib_lpm_offload_set(sw, fc, true, false, false); return err; } switch (fc->lpm_info.fib_type) { case PRESTERA_FIB_TYPE_UC_NH: __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, fc->reachable, false); break; case PRESTERA_FIB_TYPE_TRAP: __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, false, fc->reachable); break; case PRESTERA_FIB_TYPE_DROP: __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true, fc->reachable); break; case PRESTERA_FIB_TYPE_INVALID: break; } return 0; } static struct prestera_kern_fib_cache * __prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { struct prestera_kern_fib_cache_key fc_key; struct prestera_kern_fib_cache *rfc; /* TODO: parse kernel rules */ rfc = NULL; if (fc->key.kern_tb_id == RT_TABLE_LOCAL) { memcpy(&fc_key, &fc->key, sizeof(fc_key)); fc_key.kern_tb_id = RT_TABLE_MAIN; rfc = prestera_kern_fib_cache_find(sw, &fc_key); } return rfc; } static struct prestera_kern_fib_cache * __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw, struct prestera_kern_fib_cache *fc) { struct prestera_kern_fib_cache_key fc_key; struct prestera_kern_fib_cache *rfc; /* TODO: parse kernel rules */ rfc = NULL; if (fc->key.kern_tb_id == RT_TABLE_MAIN) { memcpy(&fc_key, &fc->key, sizeof(fc_key)); fc_key.kern_tb_id = RT_TABLE_LOCAL; rfc = prestera_kern_fib_cache_find(sw, &fc_key); } return rfc; } static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw, struct prestera_kern_neigh_cache *nc) { struct prestera_nh_neigh_key nh_key; struct prestera_nh_neigh *nh_neigh; struct neighbour *n; bool hw_active; prestera_util_nc_key2nh_key(&nc->key, &nh_key); nh_neigh = prestera_nh_neigh_find(sw, &nh_key); if (!nh_neigh) { pr_err("Cannot find nh_neigh for cached %pI4n", &nc->key.addr.u.ipv4); return; } hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh); #ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH if (!hw_active && nc->in_kernel) goto out; #else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ if (!hw_active) goto out; #endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */ if (nc->key.addr.v == PRESTERA_IPV4) { n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); if (!n) n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev); } else { n = NULL; } if (!IS_ERR(n) && n) { neigh_event_send(n, NULL); neigh_release(n); } else { pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4); } out: return; } /* Propagate hw state to kernel */ static void prestera_k_arb_hw_evt(struct prestera_switch *sw) { struct prestera_kern_neigh_cache *n_cache; struct rhashtable_iter iter; rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); rhashtable_walk_start(&iter); while (1) { n_cache = rhashtable_walk_next(&iter); if (!n_cache) break; if (IS_ERR(n_cache)) continue; rhashtable_walk_stop(&iter); __prestera_k_arb_hw_state_upd(sw, n_cache); rhashtable_walk_start(&iter); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } /* Propagate kernel event to hw */ static void prestera_k_arb_n_evt(struct prestera_switch *sw, struct neighbour *n) { struct prestera_kern_neigh_cache_key n_key; struct prestera_kern_neigh_cache *n_cache; int err; err = prestera_util_neigh2nc_key(sw, n, &n_key); if (err) return; n_cache = prestera_kern_neigh_cache_find(sw, &n_key); if (!n_cache) { n_cache = prestera_kern_neigh_cache_get(sw, &n_key); if (!n_cache) return; __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); } __prestera_k_arb_nc_kern_n_fetch(sw, n_cache); __prestera_k_arb_nc_apply(sw, n_cache); prestera_kern_neigh_cache_put(sw, n_cache); } static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw) { struct prestera_kern_neigh_cache *n_cache; struct rhashtable_iter iter; rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter); rhashtable_walk_start(&iter); while (1) { n_cache = rhashtable_walk_next(&iter); if (!n_cache) break; if (IS_ERR(n_cache)) continue; rhashtable_walk_stop(&iter); __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache); __prestera_k_arb_nc_apply(sw, n_cache); rhashtable_walk_start(&iter); } rhashtable_walk_stop(&iter); rhashtable_walk_exit(&iter); } static int prestera_k_arb_fib_evt(struct prestera_switch *sw, bool replace, /* replace or del */ struct fib_notifier_info *info) { struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */ struct prestera_kern_fib_cache_key fc_key; struct prestera_kern_fib_cache *fib_cache; int err; prestera_util_fen_info2fib_cache_key(info, &fc_key); fib_cache = prestera_kern_fib_cache_find(sw, &fc_key); if (fib_cache) { fib_cache->reachable = false; err = __prestera_k_arb_fc_apply(sw, fib_cache); if (err) dev_err(sw->dev->dev, "Applying destroyed fib_cache failed"); bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); if (!tfib_cache && bfib_cache) { bfib_cache->reachable = true; err = __prestera_k_arb_fc_apply(sw, bfib_cache); if (err) dev_err(sw->dev->dev, "Applying fib_cache btm failed"); } prestera_kern_fib_cache_destroy(sw, fib_cache); } if (replace) { fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info); if (!fib_cache) { dev_err(sw->dev->dev, "fib_cache == NULL"); return -ENOENT; } bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache); tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache); if (!tfib_cache) fib_cache->reachable = true; if (bfib_cache) { bfib_cache->reachable = false; err = __prestera_k_arb_fc_apply(sw, bfib_cache); if (err) dev_err(sw->dev->dev, "Applying fib_cache btm failed"); } err = __prestera_k_arb_fc_apply(sw, fib_cache); if (err) dev_err(sw->dev->dev, "Applying fib_cache failed"); } /* Update all neighs to resolve overlapped and apply related */ __prestera_k_arb_fib_evt2nc(sw); return 0; } static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg) { struct prestera_kern_neigh_cache *n_cache = ptr; struct prestera_switch *sw = arg; if (!list_empty(&n_cache->kern_fib_cache_list)) { WARN_ON(1); /* BUG */ return; } __prestera_k_arb_n_offload_set(sw, n_cache, false); n_cache->in_kernel = false; /* No need to destroy lpm. * It will be aborted by destroy_ht */ __prestera_kern_neigh_cache_destruct(sw, n_cache); kfree(n_cache); } static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg) { struct prestera_kern_fib_cache *fib_cache = ptr; struct prestera_switch *sw = arg; __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache, false, false, false); __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL, false, false); /* No need to destroy lpm. * It will be aborted by destroy_ht */ __prestera_kern_fib_cache_destruct(sw, fib_cache); kfree(fib_cache); } static void prestera_k_arb_abort(struct prestera_switch *sw) { /* Function to remove all arbiter entries and related hw objects. */ /* Sequence: * 1) Clear arbiter tables, but don't touch hw * 2) Clear hw * We use such approach, because arbiter object is not directly mapped * to hw. So deletion of one arbiter object may even lead to creation of * hw object (e.g. in case of overlapped routes). */ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht, __prestera_k_arb_abort_fib_ht_cb, sw); rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht, __prestera_k_arb_abort_neigh_ht_cb, sw); } static int __prestera_inetaddr_port_event(struct net_device *port_dev, unsigned long event, struct netlink_ext_ack *extack) { struct prestera_port *port = netdev_priv(port_dev); struct prestera_rif_entry_key re_key = {}; struct prestera_rif_entry *re; u32 kern_tb_id; int err; err = prestera_is_valid_mac_addr(port, port_dev->dev_addr); if (err) { NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix"); return err; } kern_tb_id = l3mdev_fib_table(port_dev); re_key.iface.type = PRESTERA_IF_PORT_E; re_key.iface.dev_port.hw_dev_num = port->dev_id; re_key.iface.dev_port.port_num = port->hw_id; re = prestera_rif_entry_find(port->sw, &re_key); switch (event) { case NETDEV_UP: if (re) { NL_SET_ERR_MSG_MOD(extack, "RIF already exist"); return -EEXIST; } re = prestera_rif_entry_create(port->sw, &re_key, prestera_fix_tb_id(kern_tb_id), port_dev->dev_addr); if (!re) { NL_SET_ERR_MSG_MOD(extack, "Can't create RIF"); return -EINVAL; } dev_hold(port_dev); break; case NETDEV_DOWN: if (!re) { NL_SET_ERR_MSG_MOD(extack, "Can't find RIF"); return -EEXIST; } prestera_rif_entry_destroy(port->sw, re); dev_put(port_dev); break; } return 0; } static int __prestera_inetaddr_event(struct prestera_switch *sw, struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) { if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) || netif_is_lag_port(dev)) return 0; return __prestera_inetaddr_port_event(dev, event, extack); } static int __prestera_inetaddr_cb(struct notifier_block *nb, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; struct prestera_router *router = container_of(nb, struct prestera_router, inetaddr_nb); struct in_device *idev; int err = 0; if (event != NETDEV_DOWN) goto out; /* Ignore if this is not latest address */ idev = __in_dev_get_rtnl(dev); if (idev && idev->ifa_list) goto out; err = __prestera_inetaddr_event(router->sw, dev, event, NULL); out: return notifier_from_errno(err); } static int __prestera_inetaddr_valid_cb(struct notifier_block *nb, unsigned long event, void *ptr) { struct in_validator_info *ivi = (struct in_validator_info *)ptr; struct net_device *dev = ivi->ivi_dev->dev; struct prestera_router *router = container_of(nb, struct prestera_router, inetaddr_valid_nb); struct in_device *idev; int err = 0; if (event != NETDEV_UP) goto out; /* Ignore if this is not first address */ idev = __in_dev_get_rtnl(dev); if (idev && idev->ifa_list) goto out; if (ipv4_is_multicast(ivi->ivi_addr)) { NL_SET_ERR_MSG_MOD(ivi->extack, "Multicast addr on RIF is not supported"); err = -EINVAL; goto out; } err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack); out: return notifier_from_errno(err); } struct prestera_fib_event_work { struct work_struct work; struct prestera_switch *sw; struct fib_entry_notifier_info fen_info; unsigned long event; }; static void __prestera_router_fib_event_work(struct work_struct *work) { struct prestera_fib_event_work *fib_work = container_of(work, struct prestera_fib_event_work, work); struct prestera_switch *sw = fib_work->sw; int err; rtnl_lock(); switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info.info); if (err) goto err_out; break; case FIB_EVENT_ENTRY_DEL: err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info.info); if (err) goto err_out; break; } goto out; err_out: dev_err(sw->dev->dev, "Error when processing %pI4h/%d", &fib_work->fen_info.dst, fib_work->fen_info.dst_len); out: fib_info_put(fib_work->fen_info.fi); rtnl_unlock(); kfree(fib_work); } /* Called with rcu_read_lock() */ static int __prestera_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct prestera_fib_event_work *fib_work; struct fib_entry_notifier_info *fen_info; struct fib_notifier_info *info = ptr; struct prestera_router *router; if (info->family != AF_INET) return NOTIFY_DONE; router = container_of(nb, struct prestera_router, fib_nb); switch (event) { case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); if (!fen_info->fi) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) return NOTIFY_BAD; fib_info_hold(fen_info->fi); fib_work->fen_info = *fen_info; fib_work->event = event; fib_work->sw = router->sw; INIT_WORK(&fib_work->work, __prestera_router_fib_event_work); prestera_queue_work(&fib_work->work); break; default: return NOTIFY_DONE; } return NOTIFY_DONE; } struct prestera_netevent_work { struct work_struct work; struct prestera_switch *sw; struct neighbour *n; }; static void prestera_router_neigh_event_work(struct work_struct *work) { struct prestera_netevent_work *net_work = container_of(work, struct prestera_netevent_work, work); struct prestera_switch *sw = net_work->sw; struct neighbour *n = net_work->n; /* neigh - its not hw related object. It stored only in kernel. So... */ rtnl_lock(); prestera_k_arb_n_evt(sw, n); neigh_release(n); rtnl_unlock(); kfree(net_work); } static int prestera_router_netevent_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct prestera_netevent_work *net_work; struct prestera_router *router; struct neighbour *n = ptr; router = container_of(nb, struct prestera_router, netevent_nb); switch (event) { case NETEVENT_NEIGH_UPDATE: if (n->tbl->family != AF_INET) return NOTIFY_DONE; net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); if (WARN_ON(!net_work)) return NOTIFY_BAD; neigh_clone(n); net_work->n = n; net_work->sw = router->sw; INIT_WORK(&net_work->work, prestera_router_neigh_event_work); prestera_queue_work(&net_work->work); } return NOTIFY_DONE; } static void prestera_router_update_neighs_work(struct work_struct *work) { struct prestera_router *router; router = container_of(work, struct prestera_router, neighs_update.dw.work); rtnl_lock(); prestera_k_arb_hw_evt(router->sw); rtnl_unlock(); prestera_queue_delayed_work(&router->neighs_update.dw, msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL)); } static int prestera_neigh_work_init(struct prestera_switch *sw) { INIT_DELAYED_WORK(&sw->router->neighs_update.dw, prestera_router_update_neighs_work); prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0); return 0; } static void prestera_neigh_work_fini(struct prestera_switch *sw) { cancel_delayed_work_sync(&sw->router->neighs_update.dw); } int prestera_router_init(struct prestera_switch *sw) { struct prestera_router *router; int err, nhgrp_cache_bytes; router = kzalloc(sizeof(*sw->router), GFP_KERNEL); if (!router) return -ENOMEM; sw->router = router; router->sw = sw; err = prestera_router_hw_init(sw); if (err) goto err_router_lib_init; err = rhashtable_init(&router->kern_fib_cache_ht, &__prestera_kern_fib_cache_ht_params); if (err) goto err_kern_fib_cache_ht_init; err = rhashtable_init(&router->kern_neigh_cache_ht, &__prestera_kern_neigh_cache_ht_params); if (err) goto err_kern_neigh_cache_ht_init; nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1; router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL); if (!router->nhgrp_hw_state_cache) { err = -ENOMEM; goto err_nh_state_cache_alloc; } err = prestera_neigh_work_init(sw); if (err) goto err_neigh_work_init; router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); if (err) goto err_register_inetaddr_validator_notifier; router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb; err = register_inetaddr_notifier(&router->inetaddr_nb); if (err) goto err_register_inetaddr_notifier; router->netevent_nb.notifier_call = prestera_router_netevent_event; err = register_netevent_notifier(&router->netevent_nb); if (err) goto err_register_netevent_notifier; router->fib_nb.notifier_call = __prestera_router_fib_event; err = register_fib_notifier(&init_net, &router->fib_nb, /* TODO: flush fib entries */ NULL, NULL); if (err) goto err_register_fib_notifier; return 0; err_register_fib_notifier: unregister_netevent_notifier(&router->netevent_nb); err_register_netevent_notifier: unregister_inetaddr_notifier(&router->inetaddr_nb); err_register_inetaddr_notifier: unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); err_register_inetaddr_validator_notifier: prestera_neigh_work_fini(sw); err_neigh_work_init: kfree(router->nhgrp_hw_state_cache); err_nh_state_cache_alloc: rhashtable_destroy(&router->kern_neigh_cache_ht); err_kern_neigh_cache_ht_init: rhashtable_destroy(&router->kern_fib_cache_ht); err_kern_fib_cache_ht_init: prestera_router_hw_fini(sw); err_router_lib_init: kfree(sw->router); return err; } void prestera_router_fini(struct prestera_switch *sw) { unregister_fib_notifier(&init_net, &sw->router->fib_nb); unregister_netevent_notifier(&sw->router->netevent_nb); unregister_inetaddr_notifier(&sw->router->inetaddr_nb); unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); prestera_neigh_work_fini(sw); prestera_queue_drain(); prestera_k_arb_abort(sw); kfree(sw->router->nhgrp_hw_state_cache); rhashtable_destroy(&sw->router->kern_fib_cache_ht); prestera_router_hw_fini(sw); kfree(sw->router); sw->router = NULL; }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_router.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" #include "prestera_counter.h" #define COUNTER_POLL_TIME (msecs_to_jiffies(1000)) #define COUNTER_RESCHED_TIME (msecs_to_jiffies(50)) #define COUNTER_BULK_SIZE (256) struct prestera_counter { struct prestera_switch *sw; struct delayed_work stats_dw; struct mutex mtx; /* protect block_list */ struct prestera_counter_block **block_list; u32 total_read; u32 block_list_len; u32 curr_idx; bool is_fetching; }; struct prestera_counter_block { struct list_head list; u32 id; u32 offset; u32 num_counters; u32 client; struct idr counter_idr; refcount_t refcnt; struct mutex mtx; /* protect stats and counter_idr */ struct prestera_counter_stats *stats; u8 *counter_flag; bool is_updating; bool full; }; enum { COUNTER_FLAG_READY = 0, COUNTER_FLAG_INVALID = 1 }; static bool prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) { return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; } static void prestera_counter_lock(struct prestera_counter *counter) { mutex_lock(&counter->mtx); } static void prestera_counter_unlock(struct prestera_counter *counter) { mutex_unlock(&counter->mtx); } static void prestera_counter_block_lock(struct prestera_counter_block *block) { mutex_lock(&block->mtx); } static void prestera_counter_block_unlock(struct prestera_counter_block *block) { mutex_unlock(&block->mtx); } static bool prestera_counter_block_incref(struct prestera_counter_block *block) { return refcount_inc_not_zero(&block->refcnt); } static bool prestera_counter_block_decref(struct prestera_counter_block *block) { return refcount_dec_and_test(&block->refcnt); } /* must be called with prestera_counter_block_lock() */ static void prestera_counter_stats_clear(struct prestera_counter_block *block, u32 counter_id) { memset(&block->stats[counter_id - block->offset], 0, sizeof(*block->stats)); } static struct prestera_counter_block * prestera_counter_block_lookup_not_full(struct prestera_counter *counter, u32 client) { u32 i; prestera_counter_lock(counter); for (i = 0; i < counter->block_list_len; i++) { if (counter->block_list[i] && counter->block_list[i]->client == client && !counter->block_list[i]->full && prestera_counter_block_incref(counter->block_list[i])) { prestera_counter_unlock(counter); return counter->block_list[i]; } } prestera_counter_unlock(counter); return NULL; } static int prestera_counter_block_list_add(struct prestera_counter *counter, struct prestera_counter_block *block) { struct prestera_counter_block **arr; u32 i; prestera_counter_lock(counter); for (i = 0; i < counter->block_list_len; i++) { if (counter->block_list[i]) continue; counter->block_list[i] = block; prestera_counter_unlock(counter); return 0; } arr = krealloc(counter->block_list, (counter->block_list_len + 1) * sizeof(*counter->block_list), GFP_KERNEL); if (!arr) { prestera_counter_unlock(counter); return -ENOMEM; } counter->block_list = arr; counter->block_list[counter->block_list_len] = block; counter->block_list_len++; prestera_counter_unlock(counter); return 0; } static struct prestera_counter_block * prestera_counter_block_get(struct prestera_counter *counter, u32 client) { struct prestera_counter_block *block; int err; block = prestera_counter_block_lookup_not_full(counter, client); if (block) return block; block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) return ERR_PTR(-ENOMEM); err = prestera_hw_counter_block_get(counter->sw, client, &block->id, &block->offset, &block->num_counters); if (err) goto err_block; block->stats = kcalloc(block->num_counters, sizeof(*block->stats), GFP_KERNEL); if (!block->stats) { err = -ENOMEM; goto err_stats; } block->counter_flag = kcalloc(block->num_counters, sizeof(*block->counter_flag), GFP_KERNEL); if (!block->counter_flag) { err = -ENOMEM; goto err_flag; } block->client = client; mutex_init(&block->mtx); refcount_set(&block->refcnt, 1); idr_init_base(&block->counter_idr, block->offset); err = prestera_counter_block_list_add(counter, block); if (err) goto err_list_add; return block; err_list_add: idr_destroy(&block->counter_idr); mutex_destroy(&block->mtx); kfree(block->counter_flag); err_flag: kfree(block->stats); err_stats: prestera_hw_counter_block_release(counter->sw, block->id); err_block: kfree(block); return ERR_PTR(err); } static void prestera_counter_block_put(struct prestera_counter *counter, struct prestera_counter_block *block) { u32 i; if (!prestera_counter_block_decref(block)) return; prestera_counter_lock(counter); for (i = 0; i < counter->block_list_len; i++) { if (counter->block_list[i] && counter->block_list[i]->id == block->id) { counter->block_list[i] = NULL; break; } } prestera_counter_unlock(counter); WARN_ON(!idr_is_empty(&block->counter_idr)); prestera_hw_counter_block_release(counter->sw, block->id); idr_destroy(&block->counter_idr); mutex_destroy(&block->mtx); kfree(block->stats); kfree(block); } static int prestera_counter_get_vacant(struct prestera_counter_block *block, u32 *id) { int free_id; if (block->full) return -ENOSPC; prestera_counter_block_lock(block); free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset, block->offset + block->num_counters, GFP_KERNEL); if (free_id < 0) { if (free_id == -ENOSPC) block->full = true; prestera_counter_block_unlock(block); return free_id; } *id = free_id; prestera_counter_block_unlock(block); return 0; } int prestera_counter_get(struct prestera_counter *counter, u32 client, struct prestera_counter_block **bl, u32 *counter_id) { struct prestera_counter_block *block; int err; u32 id; get_next_block: block = prestera_counter_block_get(counter, client); if (IS_ERR(block)) return PTR_ERR(block); err = prestera_counter_get_vacant(block, &id); if (err) { prestera_counter_block_put(counter, block); if (err == -ENOSPC) goto get_next_block; return err; } prestera_counter_block_lock(block); if (block->is_updating) block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID; prestera_counter_block_unlock(block); *counter_id = id; *bl = block; return 0; } void prestera_counter_put(struct prestera_counter *counter, struct prestera_counter_block *block, u32 counter_id) { if (!block) return; prestera_counter_block_lock(block); idr_remove(&block->counter_idr, counter_id); block->full = false; prestera_counter_stats_clear(block, counter_id); prestera_counter_block_unlock(block); prestera_hw_counter_clear(counter->sw, block->id, counter_id); prestera_counter_block_put(counter, block); } static u32 prestera_counter_block_idx_next(struct prestera_counter *counter, u32 curr_idx) { u32 idx, i, start = curr_idx + 1; prestera_counter_lock(counter); for (i = 0; i < counter->block_list_len; i++) { idx = (start + i) % counter->block_list_len; if (!counter->block_list[idx]) continue; prestera_counter_unlock(counter); return idx; } prestera_counter_unlock(counter); return 0; } static struct prestera_counter_block * prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) { if (idx >= counter->block_list_len) return NULL; prestera_counter_lock(counter); if (!counter->block_list[idx] || !prestera_counter_block_incref(counter->block_list[idx])) { prestera_counter_unlock(counter); return NULL; } prestera_counter_unlock(counter); return counter->block_list[idx]; } static void prestera_counter_stats_work(struct work_struct *work) { struct delayed_work *dl_work = container_of(work, struct delayed_work, work); struct prestera_counter *counter = container_of(dl_work, struct prestera_counter, stats_dw); struct prestera_counter_block *block; u32 resched_time = COUNTER_POLL_TIME; u32 count = COUNTER_BULK_SIZE; bool done = false; int err; u32 i; block = prestera_counter_block_get_by_idx(counter, counter->curr_idx); if (!block) { if (counter->is_fetching) goto abort; goto next; } if (!counter->is_fetching) { err = prestera_hw_counter_trigger(counter->sw, block->id); if (err) goto abort; prestera_counter_block_lock(block); block->is_updating = true; prestera_counter_block_unlock(block); counter->is_fetching = true; counter->total_read = 0; resched_time = COUNTER_RESCHED_TIME; goto resched; } prestera_counter_block_lock(block); err = prestera_hw_counters_get(counter->sw, counter->total_read, &count, &done, &block->stats[counter->total_read]); prestera_counter_block_unlock(block); if (err) goto abort; counter->total_read += count; if (!done || counter->total_read < block->num_counters) { resched_time = COUNTER_RESCHED_TIME; goto resched; } for (i = 0; i < block->num_counters; i++) { if (block->counter_flag[i] == COUNTER_FLAG_INVALID) { prestera_counter_block_lock(block); block->counter_flag[i] = COUNTER_FLAG_READY; memset(&block->stats[i], 0, sizeof(*block->stats)); prestera_counter_block_unlock(block); } } prestera_counter_block_lock(block); block->is_updating = false; prestera_counter_block_unlock(block); goto next; abort: prestera_hw_counter_abort(counter->sw); next: counter->is_fetching = false; counter->curr_idx = prestera_counter_block_idx_next(counter, counter->curr_idx); resched: if (block) prestera_counter_block_put(counter, block); schedule_delayed_work(&counter->stats_dw, resched_time); } /* Can be executed without rtnl_lock(). * So pay attention when something changing. */ int prestera_counter_stats_get(struct prestera_counter *counter, struct prestera_counter_block *block, u32 counter_id, u64 *packets, u64 *bytes) { if (!block || !prestera_counter_is_ready(block, counter_id)) { *packets = 0; *bytes = 0; return 0; } prestera_counter_block_lock(block); *packets = block->stats[counter_id - block->offset].packets; *bytes = block->stats[counter_id - block->offset].bytes; prestera_counter_stats_clear(block, counter_id); prestera_counter_block_unlock(block); return 0; } int prestera_counter_init(struct prestera_switch *sw) { struct prestera_counter *counter; counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return -ENOMEM; counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL); if (!counter->block_list) { kfree(counter); return -ENOMEM; } mutex_init(&counter->mtx); counter->block_list_len = 1; counter->sw = sw; sw->counter = counter; INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work); schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME); return 0; } void prestera_counter_fini(struct prestera_switch *sw) { struct prestera_counter *counter = sw->counter; u32 i; cancel_delayed_work_sync(&counter->stats_dw); for (i = 0; i < counter->block_list_len; i++) WARN_ON(counter->block_list[i]); mutex_destroy(&counter->mtx); kfree(counter->block_list); kfree(counter); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_counter.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ #include <linux/kernel.h> #include <linux/list.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" #include "prestera_flow.h" #include "prestera_span.h" struct prestera_span_entry { struct list_head list; struct prestera_port *port; refcount_t ref_count; u8 id; }; struct prestera_span { struct prestera_switch *sw; struct list_head entries; }; static struct prestera_span_entry * prestera_span_entry_create(struct prestera_port *port, u8 span_id) { struct prestera_span_entry *entry; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); refcount_set(&entry->ref_count, 1); entry->port = port; entry->id = span_id; list_add_tail(&entry->list, &port->sw->span->entries); return entry; } static void prestera_span_entry_del(struct prestera_span_entry *entry) { list_del(&entry->list); kfree(entry); } static struct prestera_span_entry * prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id) { struct prestera_span_entry *entry; list_for_each_entry(entry, &span->entries, list) { if (entry->id == span_id) return entry; } return NULL; } static struct prestera_span_entry * prestera_span_entry_find_by_port(struct prestera_span *span, struct prestera_port *port) { struct prestera_span_entry *entry; list_for_each_entry(entry, &span->entries, list) { if (entry->port == port) return entry; } return NULL; } static int prestera_span_get(struct prestera_port *port, u8 *span_id) { u8 new_span_id; struct prestera_switch *sw = port->sw; struct prestera_span_entry *entry; int err; entry = prestera_span_entry_find_by_port(sw->span, port); if (entry) { refcount_inc(&entry->ref_count); *span_id = entry->id; return 0; } err = prestera_hw_span_get(port, &new_span_id); if (err) return err; entry = prestera_span_entry_create(port, new_span_id); if (IS_ERR(entry)) { prestera_hw_span_release(sw, new_span_id); return PTR_ERR(entry); } *span_id = new_span_id; return 0; } static int prestera_span_put(struct prestera_switch *sw, u8 span_id) { struct prestera_span_entry *entry; int err; entry = prestera_span_entry_find_by_id(sw->span, span_id); if (!entry) return -ENOENT; if (!refcount_dec_and_test(&entry->ref_count)) return 0; err = prestera_hw_span_release(sw, span_id); if (err) return err; prestera_span_entry_del(entry); return 0; } int prestera_span_rule_add(struct prestera_flow_block_binding *binding, struct prestera_port *to_port, bool ingress) { struct prestera_switch *sw = binding->port->sw; u8 span_id; int err; if (binding->span_id != PRESTERA_SPAN_INVALID_ID) /* port already in mirroring */ return -EEXIST; err = prestera_span_get(to_port, &span_id); if (err) return err; err = prestera_hw_span_bind(binding->port, span_id, ingress); if (err) { prestera_span_put(sw, span_id); return err; } binding->span_id = span_id; return 0; } int prestera_span_rule_del(struct prestera_flow_block_binding *binding, bool ingress) { int err; if (binding->span_id == PRESTERA_SPAN_INVALID_ID) return -ENOENT; err = prestera_hw_span_unbind(binding->port, ingress); if (err) return err; err = prestera_span_put(binding->port->sw, binding->span_id); if (err) return err; binding->span_id = PRESTERA_SPAN_INVALID_ID; return 0; } int prestera_span_init(struct prestera_switch *sw) { struct prestera_span *span; span = kzalloc(sizeof(*span), GFP_KERNEL); if (!span) return -ENOMEM; INIT_LIST_HEAD(&span->entries); sw->span = span; span->sw = sw; return 0; } void prestera_span_fini(struct prestera_switch *sw) { struct prestera_span *span = sw->span; WARN_ON(!list_empty(&span->entries)); kfree(span); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_span.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <net/devlink.h> #include "prestera_devlink.h" #include "prestera_hw.h" /* All driver-specific traps must be documented in * Documentation/networking/devlink/prestera.rst */ enum { DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX, DEVLINK_PRESTERA_TRAP_ID_ARP_BC, DEVLINK_PRESTERA_TRAP_ID_IS_IS, DEVLINK_PRESTERA_TRAP_ID_OSPF, DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC, DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC, DEVLINK_PRESTERA_TRAP_ID_VRRP, DEVLINK_PRESTERA_TRAP_ID_DHCP, DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME, DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS, DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE, DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME, DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6, DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7, DEVLINK_PRESTERA_TRAP_ID_BGP, DEVLINK_PRESTERA_TRAP_ID_SSH, DEVLINK_PRESTERA_TRAP_ID_TELNET, DEVLINK_PRESTERA_TRAP_ID_ICMP, DEVLINK_PRESTERA_TRAP_ID_MET_RED, DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO, DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH, DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR, DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR, DEVLINK_PRESTERA_TRAP_ID_INVALID_SA, DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT, DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN, DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP, }; #define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \ "arp_bc" #define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \ "is_is" #define DEVLINK_PRESTERA_TRAP_NAME_OSPF \ "ospf" #define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \ "ip_bc_mac" #define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \ "router_mc" #define DEVLINK_PRESTERA_TRAP_NAME_VRRP \ "vrrp" #define DEVLINK_PRESTERA_TRAP_NAME_DHCP \ "dhcp" #define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \ "mac_to_me" #define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \ "ipv4_options" #define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \ "ip_default_route" #define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \ "ip_to_me" #define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \ "ipv4_icmp_redirect" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \ "acl_code_0" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \ "acl_code_1" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \ "acl_code_2" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \ "acl_code_3" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \ "acl_code_4" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \ "acl_code_5" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \ "acl_code_6" #define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \ "acl_code_7" #define DEVLINK_PRESTERA_TRAP_NAME_BGP \ "bgp" #define DEVLINK_PRESTERA_TRAP_NAME_SSH \ "ssh" #define DEVLINK_PRESTERA_TRAP_NAME_TELNET \ "telnet" #define DEVLINK_PRESTERA_TRAP_NAME_ICMP \ "icmp" #define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \ "rxdma_drop" #define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \ "port_no_vlan" #define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \ "local_port" #define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \ "invalid_sa" #define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \ "illegal_ip_addr" #define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \ "illegal_ipv4_hdr" #define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \ "ip_uc_dip_da_mismatch" #define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \ "ip_sip_is_zero" #define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \ "met_red" struct prestera_trap { struct devlink_trap trap; u8 cpu_code; }; struct prestera_trap_item { enum devlink_trap_action action; void *trap_ctx; }; struct prestera_trap_data { struct prestera_switch *sw; struct prestera_trap_item *trap_items_arr; u32 traps_count; }; #define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT #define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ PRESTERA_TRAP_METADATA) #define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \ DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ DEVLINK_PRESTERA_TRAP_NAME_##_id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ PRESTERA_TRAP_METADATA) #define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ PRESTERA_TRAP_METADATA) #define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ DEVLINK_PRESTERA_TRAP_NAME_##_id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ PRESTERA_TRAP_METADATA) #define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \ DEVLINK_PRESTERA_TRAP_NAME_##_id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ PRESTERA_TRAP_METADATA) static const struct devlink_trap_group prestera_trap_groups_arr[] = { /* No policer is associated with following groups (policerid == 0)*/ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0), DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0), DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0), DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0), DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0), DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0), DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0), DEVLINK_TRAP_GROUP_GENERIC(STP, 0), DEVLINK_TRAP_GROUP_GENERIC(LACP, 0), DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0), DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0), DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0), DEVLINK_TRAP_GROUP_GENERIC(BGP, 0), DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0), DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0), }; /* Initialize trap list, as well as associate CPU code with them. */ static struct prestera_trap prestera_trap_items_arr[] = { { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY), .cpu_code = 5, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY), .cpu_code = 13, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF), .cpu_code = 16, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY), .cpu_code = 19, }, { .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP), .cpu_code = 26, }, { .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP), .cpu_code = 27, }, { .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP), .cpu_code = 28, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY), .cpu_code = 29, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP), .cpu_code = 30, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP), .cpu_code = 33, }, { .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS), .cpu_code = 63, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY), .cpu_code = 65, }, { .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS), .cpu_code = 133, }, { .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS, L3_EXCEPTIONS), .cpu_code = 141, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE, LOCAL_DELIVERY), .cpu_code = 160, }, { .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY, TRAP), .cpu_code = 161, }, { .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT, L3_EXCEPTIONS), .cpu_code = 180, }, { .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY, TRAP), .cpu_code = 188, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP), .cpu_code = 192, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP), .cpu_code = 193, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP), .cpu_code = 194, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP), .cpu_code = 195, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP), .cpu_code = 196, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP), .cpu_code = 197, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP), .cpu_code = 198, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP), .cpu_code = 199, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP), .cpu_code = 206, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY), .cpu_code = 207, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY), .cpu_code = 208, }, { .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY), .cpu_code = 209, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS), .cpu_code = 37, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS), .cpu_code = 39, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS), .cpu_code = 56, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS), .cpu_code = 60, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS), .cpu_code = 136, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS), .cpu_code = 137, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH, L3_DROPS), .cpu_code = 138, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS), .cpu_code = 145, }, { .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS), .cpu_code = 185, }, }; static int prestera_drop_counter_get(struct devlink *devlink, const struct devlink_trap *trap, u64 *p_drops); static int prestera_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct prestera_switch *sw = devlink_priv(dl); char buf[16]; snprintf(buf, sizeof(buf), "%d.%d.%d", sw->dev->fw_rev.maj, sw->dev->fw_rev.min, sw->dev->fw_rev.sub); return devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW, buf); } static int prestera_trap_init(struct devlink *devlink, const struct devlink_trap *trap, void *trap_ctx); static int prestera_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, enum devlink_trap_action action, struct netlink_ext_ack *extack); static const struct devlink_ops prestera_dl_ops = { .info_get = prestera_dl_info_get, .trap_init = prestera_trap_init, .trap_action_set = prestera_trap_action_set, .trap_drop_counter_get = prestera_drop_counter_get, }; struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev) { struct devlink *dl; dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch), dev->dev); return devlink_priv(dl); } void prestera_devlink_free(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); devlink_free(dl); } void prestera_devlink_register(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); devlink_register(dl); } void prestera_devlink_unregister(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); devlink_unregister(dl); } int prestera_devlink_port_register(struct prestera_port *port) { struct prestera_switch *sw = port->sw; struct devlink *dl = priv_to_devlink(sw); struct devlink_port_attrs attrs = {}; int err; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = port->fp_id; attrs.switch_id.id_len = sizeof(sw->id); memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len); devlink_port_attrs_set(&port->dl_port, &attrs); err = devlink_port_register(dl, &port->dl_port, port->fp_id); if (err) { dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err); return err; } return 0; } void prestera_devlink_port_unregister(struct prestera_port *port) { devlink_port_unregister(&port->dl_port); } int prestera_devlink_traps_register(struct prestera_switch *sw) { const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); struct devlink *devlink = priv_to_devlink(sw); struct prestera_trap_data *trap_data; struct prestera_trap *prestera_trap; int err, i; trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL); if (!trap_data) return -ENOMEM; trap_data->trap_items_arr = kcalloc(traps_count, sizeof(struct prestera_trap_item), GFP_KERNEL); if (!trap_data->trap_items_arr) { err = -ENOMEM; goto err_trap_items_alloc; } trap_data->sw = sw; trap_data->traps_count = traps_count; sw->trap_data = trap_data; err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr, groups_count); if (err) goto err_groups_register; for (i = 0; i < traps_count; i++) { prestera_trap = &prestera_trap_items_arr[i]; err = devlink_traps_register(devlink, &prestera_trap->trap, 1, sw); if (err) goto err_trap_register; } return 0; err_trap_register: for (i--; i >= 0; i--) { prestera_trap = &prestera_trap_items_arr[i]; devlink_traps_unregister(devlink, &prestera_trap->trap, 1); } devlink_trap_groups_unregister(devlink, prestera_trap_groups_arr, groups_count); err_groups_register: kfree(trap_data->trap_items_arr); err_trap_items_alloc: kfree(trap_data); return err; } static struct prestera_trap_item * prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code) { struct prestera_trap_data *trap_data = sw->trap_data; struct prestera_trap *prestera_trap; int i; for (i = 0; i < trap_data->traps_count; i++) { prestera_trap = &prestera_trap_items_arr[i]; if (cpu_code == prestera_trap->cpu_code) return &trap_data->trap_items_arr[i]; } return NULL; } void prestera_devlink_trap_report(struct prestera_port *port, struct sk_buff *skb, u8 cpu_code) { struct prestera_trap_item *trap_item; struct devlink *devlink; devlink = port->dl_port.devlink; trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code); if (unlikely(!trap_item)) return; devlink_trap_report(devlink, skb, trap_item->trap_ctx, &port->dl_port, NULL); } static struct prestera_trap_item * prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id) { struct prestera_trap_data *trap_data = sw->trap_data; int i; for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) { if (prestera_trap_items_arr[i].trap.id == trap_id) return &trap_data->trap_items_arr[i]; } return NULL; } static int prestera_trap_init(struct devlink *devlink, const struct devlink_trap *trap, void *trap_ctx) { struct prestera_switch *sw = devlink_priv(devlink); struct prestera_trap_item *trap_item; trap_item = prestera_devlink_trap_item_lookup(sw, trap->id); if (WARN_ON(!trap_item)) return -EINVAL; trap_item->trap_ctx = trap_ctx; trap_item->action = trap->init_action; return 0; } static int prestera_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, enum devlink_trap_action action, struct netlink_ext_ack *extack) { /* Currently, driver does not support trap action altering */ return -EOPNOTSUPP; } static int prestera_drop_counter_get(struct devlink *devlink, const struct devlink_trap *trap, u64 *p_drops) { struct prestera_switch *sw = devlink_priv(devlink); enum prestera_hw_cpu_code_cnt_t cpu_code_type = PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP; struct prestera_trap *prestera_trap = container_of(trap, struct prestera_trap, trap); return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code, cpu_code_type, p_drops); } void prestera_devlink_traps_unregister(struct prestera_switch *sw) { struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); const struct devlink_trap *trap; int i; for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) { trap = &prestera_trap_items_arr[i].trap; devlink_traps_unregister(dl, trap, 1); } devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, ARRAY_SIZE(prestera_trap_groups_arr)); kfree(trap_data->trap_items_arr); kfree(trap_data); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_devlink.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/if_bridge.h> #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/notifier.h> #include <net/netevent.h> #include <net/switchdev.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_switchdev.h" #define PRESTERA_VID_ALL (0xffff) #define PRESTERA_DEFAULT_AGEING_TIME_MS 300000 #define PRESTERA_MAX_AGEING_TIME_MS 1000000000 #define PRESTERA_MIN_AGEING_TIME_MS 32000 struct prestera_fdb_event_work { struct work_struct work; struct switchdev_notifier_fdb_info fdb_info; struct net_device *dev; unsigned long event; }; struct prestera_switchdev { struct prestera_switch *sw; struct list_head bridge_list; bool bridge_8021q_exists; struct notifier_block swdev_nb_blk; struct notifier_block swdev_nb; }; struct prestera_bridge { struct list_head head; struct net_device *dev; struct prestera_switchdev *swdev; struct list_head port_list; struct list_head br_mdb_entry_list; bool mrouter_exist; bool vlan_enabled; bool multicast_enabled; u16 bridge_id; }; struct prestera_bridge_port { struct list_head head; struct net_device *dev; struct prestera_bridge *bridge; struct list_head vlan_list; struct list_head br_mdb_port_list; refcount_t ref_count; unsigned long flags; bool mrouter; u8 stp_state; }; struct prestera_bridge_vlan { struct list_head head; struct list_head port_vlan_list; u16 vid; }; struct prestera_port_vlan { struct list_head br_vlan_head; struct list_head port_head; struct prestera_port *port; struct prestera_bridge_port *br_port; u16 vid; }; struct prestera_br_mdb_port { struct prestera_bridge_port *br_port; struct list_head br_mdb_port_node; }; /* Software representation of MDB table. */ struct prestera_br_mdb_entry { struct prestera_bridge *bridge; struct prestera_mdb_entry *mdb; struct list_head br_mdb_port_list; struct list_head br_mdb_entry_node; bool enabled; }; static struct workqueue_struct *swdev_wq; static void prestera_bridge_port_put(struct prestera_bridge_port *br_port); static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, u8 state); static struct prestera_bridge * prestera_bridge_find(const struct prestera_switch *sw, const struct net_device *br_dev) { struct prestera_bridge *bridge; list_for_each_entry(bridge, &sw->swdev->bridge_list, head) if (bridge->dev == br_dev) return bridge; return NULL; } static struct prestera_bridge_port * __prestera_bridge_port_find(const struct prestera_bridge *bridge, const struct net_device *brport_dev) { struct prestera_bridge_port *br_port; list_for_each_entry(br_port, &bridge->port_list, head) if (br_port->dev == brport_dev) return br_port; return NULL; } static struct prestera_bridge_port * prestera_bridge_port_find(struct prestera_switch *sw, struct net_device *brport_dev) { struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); struct prestera_bridge *bridge; if (!br_dev) return NULL; bridge = prestera_bridge_find(sw, br_dev); if (!bridge) return NULL; return __prestera_bridge_port_find(bridge, brport_dev); } static void prestera_br_port_flags_reset(struct prestera_bridge_port *br_port, struct prestera_port *port) { prestera_port_uc_flood_set(port, false); prestera_port_mc_flood_set(port, false); prestera_port_learning_set(port, false); prestera_port_br_locked_set(port, false); } static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port, struct prestera_port *port) { int err; err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD); if (err) goto err_out; err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD); if (err) goto err_out; err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING); if (err) goto err_out; err = prestera_port_br_locked_set(port, br_port->flags & BR_PORT_LOCKED); if (err) goto err_out; return 0; err_out: prestera_br_port_flags_reset(br_port, port); return err; } static struct prestera_bridge_vlan * prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid) { struct prestera_bridge_vlan *br_vlan; br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL); if (!br_vlan) return NULL; INIT_LIST_HEAD(&br_vlan->port_vlan_list); br_vlan->vid = vid; list_add(&br_vlan->head, &br_port->vlan_list); return br_vlan; } static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan) { list_del(&br_vlan->head); WARN_ON(!list_empty(&br_vlan->port_vlan_list)); kfree(br_vlan); } static struct prestera_bridge_vlan * prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid) { struct prestera_bridge_vlan *br_vlan; list_for_each_entry(br_vlan, &br_port->vlan_list, head) { if (br_vlan->vid == vid) return br_vlan; } return NULL; } static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge, u16 vid) { struct prestera_bridge_port *br_port; struct prestera_bridge_vlan *br_vlan; int count = 0; list_for_each_entry(br_port, &bridge->port_list, head) { list_for_each_entry(br_vlan, &br_port->vlan_list, head) { if (br_vlan->vid == vid) { count += 1; break; } } } return count; } static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan) { if (list_empty(&br_vlan->port_vlan_list)) prestera_bridge_vlan_destroy(br_vlan); } static struct prestera_port_vlan * prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid) { struct prestera_port_vlan *port_vlan; list_for_each_entry(port_vlan, &port->vlans_list, port_head) { if (port_vlan->vid == vid) return port_vlan; } return NULL; } static struct prestera_port_vlan * prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged) { struct prestera_port_vlan *port_vlan; int err; port_vlan = prestera_port_vlan_by_vid(port, vid); if (port_vlan) return ERR_PTR(-EEXIST); err = prestera_hw_vlan_port_set(port, vid, true, untagged); if (err) return ERR_PTR(err); port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL); if (!port_vlan) { err = -ENOMEM; goto err_port_vlan_alloc; } port_vlan->port = port; port_vlan->vid = vid; list_add(&port_vlan->port_head, &port->vlans_list); return port_vlan; err_port_vlan_alloc: prestera_hw_vlan_port_set(port, vid, false, false); return ERR_PTR(err); } static int prestera_fdb_add(struct prestera_port *port, const unsigned char *mac, u16 vid, bool dynamic) { if (prestera_port_is_lag_member(port)) return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port), mac, vid, dynamic); return prestera_hw_fdb_add(port, mac, vid, dynamic); } static int prestera_fdb_del(struct prestera_port *port, const unsigned char *mac, u16 vid) { if (prestera_port_is_lag_member(port)) return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port), mac, vid); else return prestera_hw_fdb_del(port, mac, vid); } static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, u32 mode) { if (prestera_port_is_lag_member(port)) return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port), vid, mode); else return prestera_hw_fdb_flush_port_vlan(port, vid, mode); } static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode) { if (prestera_port_is_lag_member(port)) return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port), mode); else return prestera_hw_fdb_flush_port(port, mode); } static void prestera_mdb_port_del(struct prestera_mdb_entry *mdb, struct net_device *orig_dev) { struct prestera_flood_domain *fl_domain = mdb->flood_domain; struct prestera_flood_domain_port *flood_domain_port; flood_domain_port = prestera_flood_domain_port_find(fl_domain, orig_dev, mdb->vid); if (flood_domain_port) prestera_flood_domain_port_destroy(flood_domain_port); } static void prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb) { struct prestera_bridge_port *br_port; if (list_empty(&br_mdb->br_mdb_port_list)) { list_for_each_entry(br_port, &br_mdb->bridge->port_list, head) prestera_mdb_port_del(br_mdb->mdb, br_port->dev); prestera_mdb_entry_destroy(br_mdb->mdb); list_del(&br_mdb->br_mdb_entry_node); kfree(br_mdb); } } static void prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb, struct prestera_bridge_port *br_port) { struct prestera_br_mdb_port *br_mdb_port, *tmp; list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list, br_mdb_port_node) { if (br_mdb_port->br_port == br_port) { list_del(&br_mdb_port->br_mdb_port_node); kfree(br_mdb_port); } } } static void prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port) { struct prestera_br_mdb_port *br_mdb_port, *tmp_port; struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp; struct prestera_bridge *br_dev = br_port->bridge; list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list, br_mdb_entry_node) { list_for_each_entry_safe(br_mdb_port, tmp_port, &br_mdb->br_mdb_port_list, br_mdb_port_node) { prestera_mdb_port_del(br_mdb->mdb, br_mdb_port->br_port->dev); prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port); } prestera_br_mdb_entry_put(br_mdb); } } static void prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan) { u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC; struct prestera_port *port = port_vlan->port; struct prestera_bridge_vlan *br_vlan; struct prestera_bridge_port *br_port; bool last_port, last_vlan; u16 vid = port_vlan->vid; int port_count; br_port = port_vlan->br_port; port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid); br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); last_vlan = list_is_singular(&br_port->vlan_list); last_port = port_count == 1; if (last_vlan) prestera_fdb_flush_port(port, fdb_flush_mode); else if (last_port) prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode); else prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode); prestera_mdb_flush_bridge_port(br_port); list_del(&port_vlan->br_vlan_head); prestera_bridge_vlan_put(br_vlan); prestera_bridge_port_put(br_port); port_vlan->br_port = NULL; } static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan) { struct prestera_port *port = port_vlan->port; u16 vid = port_vlan->vid; if (port_vlan->br_port) prestera_port_vlan_bridge_leave(port_vlan); prestera_hw_vlan_port_set(port, vid, false, false); list_del(&port_vlan->port_head); kfree(port_vlan); } static struct prestera_bridge * prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev) { bool vlan_enabled = br_vlan_enabled(dev); struct prestera_bridge *bridge; u16 bridge_id; int err; if (vlan_enabled && swdev->bridge_8021q_exists) { netdev_err(dev, "Only one VLAN-aware bridge is supported\n"); return ERR_PTR(-EINVAL); } bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) return ERR_PTR(-ENOMEM); if (vlan_enabled) { swdev->bridge_8021q_exists = true; } else { err = prestera_hw_bridge_create(swdev->sw, &bridge_id); if (err) { kfree(bridge); return ERR_PTR(err); } bridge->bridge_id = bridge_id; } bridge->vlan_enabled = vlan_enabled; bridge->swdev = swdev; bridge->dev = dev; bridge->multicast_enabled = br_multicast_enabled(dev); INIT_LIST_HEAD(&bridge->port_list); INIT_LIST_HEAD(&bridge->br_mdb_entry_list); list_add(&bridge->head, &swdev->bridge_list); return bridge; } static void prestera_bridge_destroy(struct prestera_bridge *bridge) { struct prestera_switchdev *swdev = bridge->swdev; list_del(&bridge->head); if (bridge->vlan_enabled) swdev->bridge_8021q_exists = false; else prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id); WARN_ON(!list_empty(&bridge->br_mdb_entry_list)); WARN_ON(!list_empty(&bridge->port_list)); kfree(bridge); } static void prestera_bridge_put(struct prestera_bridge *bridge) { if (list_empty(&bridge->port_list)) prestera_bridge_destroy(bridge); } static struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev, const struct net_device *dev) { struct prestera_bridge *bridge; list_for_each_entry(bridge, &swdev->bridge_list, head) if (bridge->dev == dev) return bridge; return NULL; } static struct prestera_bridge_port * __prestera_bridge_port_by_dev(struct prestera_bridge *bridge, struct net_device *dev) { struct prestera_bridge_port *br_port; list_for_each_entry(br_port, &bridge->port_list, head) { if (br_port->dev == dev) return br_port; } return NULL; } static int prestera_match_upper_bridge_dev(struct net_device *dev, struct netdev_nested_priv *priv) { if (netif_is_bridge_master(dev)) priv->data = dev; return 0; } static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev) { struct netdev_nested_priv priv = { }; netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev, &priv); return priv.data; } static struct prestera_bridge_port * prestera_bridge_port_by_dev(struct prestera_switchdev *swdev, struct net_device *dev) { struct net_device *br_dev = prestera_get_upper_bridge_dev(dev); struct prestera_bridge *bridge; if (!br_dev) return NULL; bridge = prestera_bridge_by_dev(swdev, br_dev); if (!bridge) return NULL; return __prestera_bridge_port_by_dev(bridge, dev); } static struct prestera_bridge_port * prestera_bridge_port_create(struct prestera_bridge *bridge, struct net_device *dev) { struct prestera_bridge_port *br_port; br_port = kzalloc(sizeof(*br_port), GFP_KERNEL); if (!br_port) return NULL; br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | BR_MCAST_FLOOD; br_port->stp_state = BR_STATE_DISABLED; refcount_set(&br_port->ref_count, 1); br_port->bridge = bridge; br_port->dev = dev; INIT_LIST_HEAD(&br_port->vlan_list); list_add(&br_port->head, &bridge->port_list); INIT_LIST_HEAD(&br_port->br_mdb_port_list); return br_port; } static void prestera_bridge_port_destroy(struct prestera_bridge_port *br_port) { list_del(&br_port->head); WARN_ON(!list_empty(&br_port->vlan_list)); WARN_ON(!list_empty(&br_port->br_mdb_port_list)); kfree(br_port); } static void prestera_bridge_port_get(struct prestera_bridge_port *br_port) { refcount_inc(&br_port->ref_count); } static void prestera_bridge_port_put(struct prestera_bridge_port *br_port) { struct prestera_bridge *bridge = br_port->bridge; if (refcount_dec_and_test(&br_port->ref_count)) { prestera_bridge_port_destroy(br_port); prestera_bridge_put(bridge); } } static struct prestera_bridge_port * prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev) { struct prestera_bridge_port *br_port; br_port = __prestera_bridge_port_by_dev(bridge, dev); if (br_port) { prestera_bridge_port_get(br_port); return br_port; } br_port = prestera_bridge_port_create(bridge, dev); if (!br_port) return ERR_PTR(-ENOMEM); return br_port; } static int prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port) { struct prestera_port *port = netdev_priv(br_port->dev); struct prestera_bridge *bridge = br_port->bridge; int err; err = prestera_hw_bridge_port_add(port, bridge->bridge_id); if (err) return err; err = prestera_br_port_flags_set(br_port, port); if (err) goto err_flags2port_set; return 0; err_flags2port_set: prestera_hw_bridge_port_delete(port, bridge->bridge_id); return err; } int prestera_bridge_port_join(struct net_device *br_dev, struct prestera_port *port, struct netlink_ext_ack *extack) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; struct prestera_bridge *bridge; int err; bridge = prestera_bridge_by_dev(swdev, br_dev); if (!bridge) { bridge = prestera_bridge_create(swdev, br_dev); if (IS_ERR(bridge)) return PTR_ERR(bridge); } br_port = prestera_bridge_port_add(bridge, port->dev); if (IS_ERR(br_port)) { prestera_bridge_put(bridge); return PTR_ERR(br_port); } err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, NULL, NULL, false, extack); if (err) goto err_switchdev_offload; if (bridge->vlan_enabled) return 0; err = prestera_bridge_1d_port_join(br_port); if (err) goto err_port_join; return 0; err_port_join: switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); err_switchdev_offload: prestera_bridge_port_put(br_port); return err; } static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port) { struct prestera_port *port = netdev_priv(br_port->dev); prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID); } static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port) { struct prestera_port *port = netdev_priv(br_port->dev); prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL); prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id); } static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid, u8 state) { u8 hw_state = state; switch (state) { case BR_STATE_DISABLED: hw_state = PRESTERA_STP_DISABLED; break; case BR_STATE_BLOCKING: case BR_STATE_LISTENING: hw_state = PRESTERA_STP_BLOCK_LISTEN; break; case BR_STATE_LEARNING: hw_state = PRESTERA_STP_LEARN; break; case BR_STATE_FORWARDING: hw_state = PRESTERA_STP_FORWARD; break; default: return -EINVAL; } return prestera_hw_vlan_port_stp_set(port, vid, hw_state); } void prestera_bridge_port_leave(struct net_device *br_dev, struct prestera_port *port) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; struct prestera_bridge *bridge; bridge = prestera_bridge_by_dev(swdev, br_dev); if (!bridge) return; br_port = __prestera_bridge_port_by_dev(bridge, port->dev); if (!br_port) return; bridge = br_port->bridge; if (bridge->vlan_enabled) prestera_bridge_1q_port_leave(br_port); else prestera_bridge_1d_port_leave(br_port); switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); prestera_mdb_flush_bridge_port(br_port); prestera_br_port_flags_reset(br_port, port); prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); prestera_bridge_port_put(br_port); } static int prestera_port_attr_br_flags_set(struct prestera_port *port, struct net_device *dev, struct switchdev_brport_flags flags) { struct prestera_bridge_port *br_port; br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); if (!br_port) return 0; br_port->flags &= ~flags.mask; br_port->flags |= flags.val & flags.mask; return prestera_br_port_flags_set(br_port, port); } static int prestera_port_attr_br_ageing_set(struct prestera_port *port, unsigned long ageing_clock_t) { unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies); struct prestera_switch *sw = port->sw; if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS || ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS) return -ERANGE; return prestera_hw_switch_ageing_set(sw, ageing_time_ms); } static int prestera_port_attr_br_vlan_set(struct prestera_port *port, struct net_device *dev, bool vlan_enabled) { struct prestera_switch *sw = port->sw; struct prestera_bridge *bridge; bridge = prestera_bridge_by_dev(sw->swdev, dev); if (WARN_ON(!bridge)) return -EINVAL; if (bridge->vlan_enabled == vlan_enabled) return 0; netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n"); return -EINVAL; } static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port, struct prestera_bridge_vlan *br_vlan, u8 state) { struct prestera_port_vlan *port_vlan; list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) { if (port_vlan->port != port) continue; return prestera_port_vid_stp_set(port, br_vlan->vid, state); } return 0; } static int prestera_port_attr_stp_state_set(struct prestera_port *port, struct net_device *dev, u8 state) { struct prestera_bridge_port *br_port; struct prestera_bridge_vlan *br_vlan; int err; u16 vid; br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev); if (!br_port) return 0; if (!br_port->bridge->vlan_enabled) { vid = br_port->bridge->bridge_id; err = prestera_port_vid_stp_set(port, vid, state); if (err) goto err_port_stp_set; } else { list_for_each_entry(br_vlan, &br_port->vlan_list, head) { err = prestera_port_bridge_vlan_stp_set(port, br_vlan, state); if (err) goto err_port_vlan_stp_set; } } br_port->stp_state = state; return 0; err_port_vlan_stp_set: list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head) prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state); return err; err_port_stp_set: prestera_port_vid_stp_set(port, vid, br_port->stp_state); return err; } static int prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port, bool enabled) { struct prestera_port *pr_port; struct prestera_switch *sw; u16 lag_id; int err; pr_port = prestera_port_dev_lower_find(br_port->dev); if (!pr_port) return 0; sw = pr_port->sw; err = prestera_lag_id(sw, br_port->dev, &lag_id); if (err) return err; list_for_each_entry(pr_port, &sw->port_list, list) { if (pr_port->lag->lag_id == lag_id) { err = prestera_port_mc_flood_set(pr_port, enabled); if (err) return err; } } return 0; } static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev) { struct prestera_bridge_port *br_port; struct prestera_port *port; bool enabled; int err; /* if mrouter exists: * - make sure every mrouter receives unreg mcast traffic; * if mrouter doesn't exists: * - make sure every port receives unreg mcast traffic; */ list_for_each_entry(br_port, &br_dev->port_list, head) { if (br_dev->multicast_enabled && br_dev->mrouter_exist) enabled = br_port->mrouter; else enabled = br_port->flags & BR_MCAST_FLOOD; if (netif_is_lag_master(br_port->dev)) { err = prestera_br_port_lag_mdb_mc_enable_sync(br_port, enabled); if (err) return err; continue; } port = prestera_port_dev_lower_find(br_port->dev); if (!port) continue; err = prestera_port_mc_flood_set(port, enabled); if (err) return err; } return 0; } static bool prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb, struct net_device *orig_dev) { struct prestera_br_mdb_port *tmp_port; list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list, br_mdb_port_node) if (tmp_port->br_port->dev == orig_dev) return true; return false; } static int prestera_mdb_port_add(struct prestera_mdb_entry *mdb, struct net_device *orig_dev, const unsigned char addr[ETH_ALEN], u16 vid) { struct prestera_flood_domain *flood_domain = mdb->flood_domain; int err; if (!prestera_flood_domain_port_find(flood_domain, orig_dev, vid)) { err = prestera_flood_domain_port_create(flood_domain, orig_dev, vid); if (err) return err; } return 0; } /* Sync bridge mdb (software table) with HW table (if MC is enabled). */ static int prestera_br_mdb_sync(struct prestera_bridge *br_dev) { struct prestera_br_mdb_port *br_mdb_port; struct prestera_bridge_port *br_port; struct prestera_br_mdb_entry *br_mdb; struct prestera_mdb_entry *mdb; struct prestera_port *pr_port; int err = 0; if (!br_dev->multicast_enabled) return 0; list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, br_mdb_entry_node) { mdb = br_mdb->mdb; /* Make sure every port that explicitly been added to the mdb * joins the specified group. */ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list, br_mdb_port_node) { br_port = br_mdb_port->br_port; pr_port = prestera_port_dev_lower_find(br_port->dev); /* Match only mdb and br_mdb ports that belong to the * same broadcast domain. */ if (br_dev->vlan_enabled && !prestera_port_vlan_by_vid(pr_port, mdb->vid)) continue; /* If port is not in MDB or there's no Mrouter * clear HW mdb. */ if (prestera_br_mdb_port_is_member(br_mdb, br_mdb_port->br_port->dev) && br_dev->mrouter_exist) err = prestera_mdb_port_add(mdb, br_port->dev, mdb->addr, mdb->vid); else prestera_mdb_port_del(mdb, br_port->dev); if (err) return err; } /* Make sure that every mrouter port joins every MC group int * broadcast domain. If it's not an mrouter - it should leave */ list_for_each_entry(br_port, &br_dev->port_list, head) { pr_port = prestera_port_dev_lower_find(br_port->dev); /* Make sure mrouter woudln't receive traffci from * another broadcast domain (e.g. from a vlan, which * mrouter port is not a member of). */ if (br_dev->vlan_enabled && !prestera_port_vlan_by_vid(pr_port, mdb->vid)) continue; if (br_port->mrouter) { err = prestera_mdb_port_add(mdb, br_port->dev, mdb->addr, mdb->vid); if (err) return err; } else if (!br_port->mrouter && !prestera_br_mdb_port_is_member (br_mdb, br_port->dev)) { prestera_mdb_port_del(mdb, br_port->dev); } } } return 0; } static int prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable) { int err; if (enable != br_mdb->enabled) { if (enable) err = prestera_hw_mdb_create(br_mdb->mdb); else err = prestera_hw_mdb_destroy(br_mdb->mdb); if (err) return err; br_mdb->enabled = enable; } return 0; } static int prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable) { struct prestera_br_mdb_entry *br_mdb; int err; list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, br_mdb_entry_node) { err = prestera_mdb_enable_set(br_mdb, enable); if (err) return err; } return 0; } static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port, struct net_device *orig_dev, bool mc_disabled) { struct prestera_switch *sw = port->sw; struct prestera_bridge *br_dev; br_dev = prestera_bridge_find(sw, orig_dev); if (!br_dev) return 0; br_dev->multicast_enabled = !mc_disabled; /* There's no point in enabling mdb back if router is missing. */ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled && br_dev->mrouter_exist)); WARN_ON(prestera_br_mdb_sync(br_dev)); WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev)); return 0; } static bool prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev) { struct prestera_bridge_port *br_port; list_for_each_entry(br_port, &br_dev->port_list, head) if (br_port->mrouter) return true; return false; } static int prestera_port_attr_mrouter_set(struct prestera_port *port, struct net_device *orig_dev, bool is_port_mrouter) { struct prestera_bridge_port *br_port; struct prestera_bridge *br_dev; br_port = prestera_bridge_port_find(port->sw, orig_dev); if (!br_port) return 0; br_dev = br_port->bridge; br_port->mrouter = is_port_mrouter; br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev); /* Enable MDB processing if both mrouter exists and mc is enabled. * In case if MC enabled, but there is no mrouter, device would flood * all multicast traffic (even if MDB table is not empty) with the use * of bridge's flood capabilities (without the use of flood_domain). */ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled && br_dev->mrouter_exist)); WARN_ON(prestera_br_mdb_sync(br_dev)); WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev)); return 0; } static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { struct prestera_port *port = netdev_priv(dev); int err = 0; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = prestera_port_attr_stp_state_set(port, attr->orig_dev, attr->u.stp_state); break; case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED)) err = -EINVAL; break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = prestera_port_attr_br_flags_set(port, attr->orig_dev, attr->u.brport_flags); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: err = prestera_port_attr_br_ageing_set(port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: err = prestera_port_attr_br_vlan_set(port, attr->orig_dev, attr->u.vlan_filtering); break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: err = prestera_port_attr_mrouter_set(port, attr->orig_dev, attr->u.mrouter); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev, attr->u.mc_disabled); break; default: err = -EOPNOTSUPP; } return err; } static void prestera_fdb_offload_notify(struct prestera_port *port, struct switchdev_notifier_fdb_info *info) { struct switchdev_notifier_fdb_info send_info = {}; send_info.addr = info->addr; send_info.vid = info->vid; send_info.offloaded = true; call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev, &send_info.info, NULL); } static int prestera_port_fdb_set(struct prestera_port *port, struct switchdev_notifier_fdb_info *fdb_info, bool adding) { struct prestera_switch *sw = port->sw; struct prestera_bridge_port *br_port; struct prestera_bridge *bridge; int err; u16 vid; br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); if (!br_port) return -EINVAL; bridge = br_port->bridge; if (bridge->vlan_enabled) vid = fdb_info->vid; else vid = bridge->bridge_id; if (adding) err = prestera_fdb_add(port, fdb_info->addr, vid, false); else err = prestera_fdb_del(port, fdb_info->addr, vid); return err; } static void prestera_fdb_event_work(struct work_struct *work) { struct switchdev_notifier_fdb_info *fdb_info; struct prestera_fdb_event_work *swdev_work; struct prestera_port *port; struct net_device *dev; int err; swdev_work = container_of(work, struct prestera_fdb_event_work, work); dev = swdev_work->dev; rtnl_lock(); port = prestera_port_dev_lower_find(dev); if (!port) goto out_unlock; switch (swdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &swdev_work->fdb_info; if (!fdb_info->added_by_user || fdb_info->is_local) break; err = prestera_port_fdb_set(port, fdb_info, true); if (err) break; prestera_fdb_offload_notify(port, fdb_info); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &swdev_work->fdb_info; prestera_port_fdb_set(port, fdb_info, false); break; } out_unlock: rtnl_unlock(); kfree(swdev_work->fdb_info.addr); kfree(swdev_work); dev_put(dev); } static int prestera_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct switchdev_notifier_fdb_info *fdb_info; struct switchdev_notifier_info *info = ptr; struct prestera_fdb_event_work *swdev_work; struct net_device *upper; int err; if (event == SWITCHDEV_PORT_ATTR_SET) { err = switchdev_handle_port_attr_set(dev, ptr, prestera_netdev_check, prestera_port_obj_attr_set); return notifier_from_errno(err); } if (!prestera_netdev_check(dev)) return NOTIFY_DONE; upper = netdev_master_upper_dev_get_rcu(dev); if (!upper) return NOTIFY_DONE; if (!netif_is_bridge_master(upper)) return NOTIFY_DONE; swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC); if (!swdev_work) return NOTIFY_BAD; swdev_work->event = event; swdev_work->dev = dev; switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = container_of(info, struct switchdev_notifier_fdb_info, info); INIT_WORK(&swdev_work->work, prestera_fdb_event_work); memcpy(&swdev_work->fdb_info, ptr, sizeof(swdev_work->fdb_info)); swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); if (!swdev_work->fdb_info.addr) goto out_bad; ether_addr_copy((u8 *)swdev_work->fdb_info.addr, fdb_info->addr); dev_hold(dev); break; default: kfree(swdev_work); return NOTIFY_DONE; } queue_work(swdev_wq, &swdev_work->work); return NOTIFY_DONE; out_bad: kfree(swdev_work); return NOTIFY_BAD; } static int prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan, struct prestera_bridge_port *br_port) { struct prestera_port *port = port_vlan->port; struct prestera_bridge_vlan *br_vlan; u16 vid = port_vlan->vid; int err; if (port_vlan->br_port) return 0; err = prestera_br_port_flags_set(br_port, port); if (err) goto err_flags2port_set; err = prestera_port_vid_stp_set(port, vid, br_port->stp_state); if (err) goto err_port_vid_stp_set; br_vlan = prestera_bridge_vlan_by_vid(br_port, vid); if (!br_vlan) { br_vlan = prestera_bridge_vlan_create(br_port, vid); if (!br_vlan) { err = -ENOMEM; goto err_bridge_vlan_get; } } list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list); prestera_bridge_port_get(br_port); port_vlan->br_port = br_port; return 0; err_bridge_vlan_get: prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING); err_port_vid_stp_set: prestera_br_port_flags_reset(br_port, port); err_flags2port_set: return err; } static int prestera_bridge_port_vlan_add(struct prestera_port *port, struct prestera_bridge_port *br_port, u16 vid, bool is_untagged, bool is_pvid, struct netlink_ext_ack *extack) { struct prestera_port_vlan *port_vlan; u16 old_pvid = port->pvid; u16 pvid; int err; if (is_pvid) pvid = vid; else pvid = port->pvid == vid ? 0 : port->pvid; port_vlan = prestera_port_vlan_by_vid(port, vid); if (port_vlan && port_vlan->br_port != br_port) return -EEXIST; if (!port_vlan) { port_vlan = prestera_port_vlan_create(port, vid, is_untagged); if (IS_ERR(port_vlan)) return PTR_ERR(port_vlan); } else { err = prestera_hw_vlan_port_set(port, vid, true, is_untagged); if (err) goto err_port_vlan_set; } err = prestera_port_pvid_set(port, pvid); if (err) goto err_port_pvid_set; err = prestera_port_vlan_bridge_join(port_vlan, br_port); if (err) goto err_port_vlan_bridge_join; return 0; err_port_vlan_bridge_join: prestera_port_pvid_set(port, old_pvid); err_port_pvid_set: prestera_hw_vlan_port_set(port, vid, false, false); err_port_vlan_set: prestera_port_vlan_destroy(port_vlan); return err; } static void prestera_bridge_port_vlan_del(struct prestera_port *port, struct prestera_bridge_port *br_port, u16 vid) { u16 pvid = port->pvid == vid ? 0 : port->pvid; struct prestera_port_vlan *port_vlan; port_vlan = prestera_port_vlan_by_vid(port, vid); if (WARN_ON(!port_vlan)) return; prestera_port_vlan_bridge_leave(port_vlan); prestera_port_pvid_set(port, pvid); prestera_port_vlan_destroy(port_vlan); } static int prestera_port_vlans_add(struct prestera_port *port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; struct net_device *orig_dev = vlan->obj.orig_dev; struct prestera_bridge_port *br_port; struct prestera_switch *sw = port->sw; struct prestera_bridge *bridge; if (netif_is_bridge_master(orig_dev)) return 0; br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); if (WARN_ON(!br_port)) return -EINVAL; bridge = br_port->bridge; if (!bridge->vlan_enabled) return 0; return prestera_bridge_port_vlan_add(port, br_port, vlan->vid, flag_untagged, flag_pvid, extack); } static struct prestera_br_mdb_entry * prestera_br_mdb_entry_create(struct prestera_switch *sw, struct prestera_bridge *br_dev, const unsigned char *addr, u16 vid) { struct prestera_br_mdb_entry *br_mdb_entry; struct prestera_mdb_entry *mdb_entry; br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL); if (!br_mdb_entry) return NULL; mdb_entry = prestera_mdb_entry_create(sw, addr, vid); if (!mdb_entry) goto err_mdb_alloc; br_mdb_entry->mdb = mdb_entry; br_mdb_entry->bridge = br_dev; br_mdb_entry->enabled = true; INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list); list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list); return br_mdb_entry; err_mdb_alloc: kfree(br_mdb_entry); return NULL; } static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb, struct prestera_bridge_port *br_port) { struct prestera_br_mdb_port *br_mdb_port; list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list, br_mdb_port_node) if (br_mdb_port->br_port == br_port) return 0; br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL); if (!br_mdb_port) return -ENOMEM; br_mdb_port->br_port = br_port; list_add(&br_mdb_port->br_mdb_port_node, &br_mdb->br_mdb_port_list); return 0; } static struct prestera_br_mdb_entry * prestera_br_mdb_entry_find(struct prestera_bridge *br_dev, const unsigned char *addr, u16 vid) { struct prestera_br_mdb_entry *br_mdb; list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list, br_mdb_entry_node) if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) && vid == br_mdb->mdb->vid) return br_mdb; return NULL; } static struct prestera_br_mdb_entry * prestera_br_mdb_entry_get(struct prestera_switch *sw, struct prestera_bridge *br_dev, const unsigned char *addr, u16 vid) { struct prestera_br_mdb_entry *br_mdb; br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid); if (br_mdb) return br_mdb; return prestera_br_mdb_entry_create(sw, br_dev, addr, vid); } static int prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb) { struct prestera_br_mdb_entry *br_mdb; struct prestera_bridge_port *br_port; struct prestera_bridge *br_dev; struct prestera_switch *sw; struct prestera_port *port; int err; sw = prestera_switch_get(mdb->obj.orig_dev); port = prestera_port_dev_lower_find(mdb->obj.orig_dev); br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev); if (!br_port) return 0; br_dev = br_port->bridge; if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid)) return 0; if (mdb->vid) br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0], mdb->vid); else br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0], br_dev->bridge_id); if (!br_mdb) return -ENOMEM; /* Make sure newly allocated MDB entry gets disabled if either MC is * disabled, or the mrouter does not exist. */ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled && br_dev->mrouter_exist)); err = prestera_br_mdb_port_add(br_mdb, br_port); if (err) { prestera_br_mdb_entry_put(br_mdb); return err; } err = prestera_br_mdb_sync(br_dev); if (err) return err; return 0; } static int prestera_port_obj_add(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack) { struct prestera_port *port = netdev_priv(dev); const struct switchdev_obj_port_vlan *vlan; const struct switchdev_obj_port_mdb *mdb; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); return prestera_port_vlans_add(port, vlan, extack); case SWITCHDEV_OBJ_ID_PORT_MDB: mdb = SWITCHDEV_OBJ_PORT_MDB(obj); err = prestera_mdb_port_addr_obj_add(mdb); break; case SWITCHDEV_OBJ_ID_HOST_MDB: fallthrough; default: err = -EOPNOTSUPP; break; } return err; } static int prestera_port_vlans_del(struct prestera_port *port, const struct switchdev_obj_port_vlan *vlan) { struct net_device *orig_dev = vlan->obj.orig_dev; struct prestera_bridge_port *br_port; struct prestera_switch *sw = port->sw; if (netif_is_bridge_master(orig_dev)) return -EOPNOTSUPP; br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev); if (WARN_ON(!br_port)) return -EINVAL; if (!br_port->bridge->vlan_enabled) return 0; prestera_bridge_port_vlan_del(port, br_port, vlan->vid); return 0; } static int prestera_mdb_port_addr_obj_del(struct prestera_port *port, const struct switchdev_obj_port_mdb *mdb) { struct prestera_br_mdb_entry *br_mdb; struct prestera_bridge_port *br_port; struct prestera_bridge *br_dev; int err; /* Bridge port no longer exists - and so does this MDB entry */ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev); if (!br_port) return 0; /* Removing MDB with non-existing VLAN - not supported; */ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid)) return 0; br_dev = br_port->bridge; if (br_port->bridge->vlan_enabled) br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0], mdb->vid); else br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0], br_port->bridge->bridge_id); if (!br_mdb) return 0; /* Since there might be a situation that this port was the last in the * MDB group, we have to both remove this port from software and HW MDB, * sync MDB table, and then destroy software MDB (if needed). */ prestera_br_mdb_port_del(br_mdb, br_port); prestera_br_mdb_entry_put(br_mdb); err = prestera_br_mdb_sync(br_dev); if (err) return err; return 0; } static int prestera_port_obj_del(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj) { struct prestera_port *port = netdev_priv(dev); const struct switchdev_obj_port_mdb *mdb; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj)); case SWITCHDEV_OBJ_ID_PORT_MDB: mdb = SWITCHDEV_OBJ_PORT_MDB(obj); err = prestera_mdb_port_addr_obj_del(port, mdb); break; default: err = -EOPNOTSUPP; break; } return err; } static int prestera_switchdev_blk_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); int err; switch (event) { case SWITCHDEV_PORT_OBJ_ADD: err = switchdev_handle_port_obj_add(dev, ptr, prestera_netdev_check, prestera_port_obj_add); break; case SWITCHDEV_PORT_OBJ_DEL: err = switchdev_handle_port_obj_del(dev, ptr, prestera_netdev_check, prestera_port_obj_del); break; case SWITCHDEV_PORT_ATTR_SET: err = switchdev_handle_port_attr_set(dev, ptr, prestera_netdev_check, prestera_port_obj_attr_set); break; default: return NOTIFY_DONE; } return notifier_from_errno(err); } static void prestera_fdb_event(struct prestera_switch *sw, struct prestera_event *evt, void *arg) { struct switchdev_notifier_fdb_info info = {}; struct net_device *dev = NULL; struct prestera_port *port; struct prestera_lag *lag; switch (evt->fdb_evt.type) { case PRESTERA_FDB_ENTRY_TYPE_REG_PORT: port = prestera_find_port(sw, evt->fdb_evt.dest.port_id); if (port) dev = port->dev; break; case PRESTERA_FDB_ENTRY_TYPE_LAG: lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id); if (lag) dev = lag->dev; break; default: return; } if (!dev) return; info.addr = evt->fdb_evt.data.mac; info.vid = evt->fdb_evt.vid; info.offloaded = true; rtnl_lock(); switch (evt->id) { case PRESTERA_FDB_EVENT_LEARNED: call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, dev, &info.info, NULL); break; case PRESTERA_FDB_EVENT_AGED: call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, NULL); break; } rtnl_unlock(); } static int prestera_fdb_init(struct prestera_switch *sw) { int err; err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB, prestera_fdb_event, NULL); if (err) return err; err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS); if (err) goto err_ageing_set; return 0; err_ageing_set: prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, prestera_fdb_event); return err; } static void prestera_fdb_fini(struct prestera_switch *sw) { prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB, prestera_fdb_event); } static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev) { int err; swdev->swdev_nb.notifier_call = prestera_switchdev_event; err = register_switchdev_notifier(&swdev->swdev_nb); if (err) goto err_register_swdev_notifier; swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event; err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk); if (err) goto err_register_blk_swdev_notifier; return 0; err_register_blk_swdev_notifier: unregister_switchdev_notifier(&swdev->swdev_nb); err_register_swdev_notifier: destroy_workqueue(swdev_wq); return err; } static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev) { unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk); unregister_switchdev_notifier(&swdev->swdev_nb); } int prestera_switchdev_init(struct prestera_switch *sw) { struct prestera_switchdev *swdev; int err; swdev = kzalloc(sizeof(*swdev), GFP_KERNEL); if (!swdev) return -ENOMEM; sw->swdev = swdev; swdev->sw = sw; INIT_LIST_HEAD(&swdev->bridge_list); swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br"); if (!swdev_wq) { err = -ENOMEM; goto err_alloc_wq; } err = prestera_switchdev_handler_init(swdev); if (err) goto err_swdev_init; err = prestera_fdb_init(sw); if (err) goto err_fdb_init; return 0; err_fdb_init: err_swdev_init: destroy_workqueue(swdev_wq); err_alloc_wq: kfree(swdev); return err; } void prestera_switchdev_fini(struct prestera_switch *sw) { struct prestera_switchdev *swdev = sw->swdev; prestera_fdb_fini(sw); prestera_switchdev_handler_fini(swdev); destroy_workqueue(swdev_wq); kfree(swdev); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ #include <linux/rhashtable.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_router_hw.h" #include "prestera_acl.h" /* Nexthop is pointed * to port (not rif) * +-------+ * +>|nexthop| * | +-------+ * | * +--+ +-----++ * +------->|vr|<-+ +>|nh_grp| * | +--+ | | +------+ * | | | * +-+-------+ +--+---+-+ * |rif_entry| |fib_node| * +---------+ +--------+ * Rif is Fib - is exit point * used as * entry point * for vr in hw */ #define PRESTERA_NHGR_UNUSED (0) #define PRESTERA_NHGR_DROP (0xFFFFFFFF) /* Need to merge it with router_manager */ #define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */ static const struct rhashtable_params __prestera_fib_ht_params = { .key_offset = offsetof(struct prestera_fib_node, key), .head_offset = offsetof(struct prestera_fib_node, ht_node), .key_len = sizeof(struct prestera_fib_key), .automatic_shrinking = true, }; static const struct rhashtable_params __prestera_nh_neigh_ht_params = { .key_offset = offsetof(struct prestera_nh_neigh, key), .key_len = sizeof(struct prestera_nh_neigh_key), .head_offset = offsetof(struct prestera_nh_neigh, ht_node), }; static const struct rhashtable_params __prestera_nexthop_group_ht_params = { .key_offset = offsetof(struct prestera_nexthop_group, key), .key_len = sizeof(struct prestera_nexthop_group_key), .head_offset = offsetof(struct prestera_nexthop_group, ht_node), }; static int prestera_nexthop_group_set(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp); static bool prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp); static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg); /* TODO: move to router.h as macros */ static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key) { return memchr_inv(key, 0, sizeof(*key)) ? true : false; } int prestera_router_hw_init(struct prestera_switch *sw) { int err; err = rhashtable_init(&sw->router->nh_neigh_ht, &__prestera_nh_neigh_ht_params); if (err) goto err_nh_neigh_ht_init; err = rhashtable_init(&sw->router->nexthop_group_ht, &__prestera_nexthop_group_ht_params); if (err) goto err_nexthop_grp_ht_init; err = rhashtable_init(&sw->router->fib_ht, &__prestera_fib_ht_params); if (err) goto err_fib_ht_init; INIT_LIST_HEAD(&sw->router->vr_list); INIT_LIST_HEAD(&sw->router->rif_entry_list); return 0; err_fib_ht_init: rhashtable_destroy(&sw->router->nexthop_group_ht); err_nexthop_grp_ht_init: rhashtable_destroy(&sw->router->nh_neigh_ht); err_nh_neigh_ht_init: return 0; } void prestera_router_hw_fini(struct prestera_switch *sw) { rhashtable_free_and_destroy(&sw->router->fib_ht, prestera_fib_node_destroy_ht_cb, sw); WARN_ON(!list_empty(&sw->router->vr_list)); WARN_ON(!list_empty(&sw->router->rif_entry_list)); rhashtable_destroy(&sw->router->fib_ht); rhashtable_destroy(&sw->router->nexthop_group_ht); rhashtable_destroy(&sw->router->nh_neigh_ht); } static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, u32 tb_id) { struct prestera_vr *vr; list_for_each_entry(vr, &sw->router->vr_list, router_node) { if (vr->tb_id == tb_id) return vr; } return NULL; } static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, u32 tb_id, struct netlink_ext_ack *extack) { struct prestera_vr *vr; int err; vr = kzalloc(sizeof(*vr), GFP_KERNEL); if (!vr) { err = -ENOMEM; goto err_alloc_vr; } vr->tb_id = tb_id; err = prestera_hw_vr_create(sw, &vr->hw_vr_id); if (err) goto err_hw_create; list_add(&vr->router_node, &sw->router->vr_list); return vr; err_hw_create: kfree(vr); err_alloc_vr: return ERR_PTR(err); } static void __prestera_vr_destroy(struct prestera_switch *sw, struct prestera_vr *vr) { list_del(&vr->router_node); prestera_hw_vr_delete(sw, vr->hw_vr_id); kfree(vr); } static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id, struct netlink_ext_ack *extack) { struct prestera_vr *vr; vr = __prestera_vr_find(sw, tb_id); if (vr) { refcount_inc(&vr->refcount); } else { vr = __prestera_vr_create(sw, tb_id, extack); if (IS_ERR(vr)) return ERR_CAST(vr); refcount_set(&vr->refcount, 1); } return vr; } static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr) { if (refcount_dec_and_test(&vr->refcount)) __prestera_vr_destroy(sw, vr); } /* iface is overhead struct. vr_id also can be removed. */ static int __prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in, struct prestera_rif_entry_key *out) { memset(out, 0, sizeof(*out)); switch (in->iface.type) { case PRESTERA_IF_PORT_E: out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num; out->iface.dev_port.port_num = in->iface.dev_port.port_num; break; case PRESTERA_IF_LAG_E: out->iface.lag_id = in->iface.lag_id; break; case PRESTERA_IF_VID_E: out->iface.vlan_id = in->iface.vlan_id; break; default: WARN(1, "Unsupported iface type"); return -EINVAL; } out->iface.type = in->iface.type; return 0; } struct prestera_rif_entry * prestera_rif_entry_find(const struct prestera_switch *sw, const struct prestera_rif_entry_key *k) { struct prestera_rif_entry *rif_entry; struct prestera_rif_entry_key lk; /* lookup key */ if (__prestera_rif_entry_key_copy(k, &lk)) return NULL; list_for_each_entry(rif_entry, &sw->router->rif_entry_list, router_node) { if (!memcmp(k, &rif_entry->key, sizeof(*k))) return rif_entry; } return NULL; } void prestera_rif_entry_destroy(struct prestera_switch *sw, struct prestera_rif_entry *e) { struct prestera_iface iface; list_del(&e->router_node); memcpy(&iface, &e->key.iface, sizeof(iface)); iface.vr_id = e->vr->hw_vr_id; prestera_hw_rif_delete(sw, e->hw_id, &iface); prestera_vr_put(sw, e->vr); kfree(e); } struct prestera_rif_entry * prestera_rif_entry_create(struct prestera_switch *sw, struct prestera_rif_entry_key *k, u32 tb_id, const unsigned char *addr) { int err; struct prestera_rif_entry *e; struct prestera_iface iface; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) goto err_kzalloc; if (__prestera_rif_entry_key_copy(k, &e->key)) goto err_key_copy; e->vr = prestera_vr_get(sw, tb_id, NULL); if (IS_ERR(e->vr)) goto err_vr_get; memcpy(&e->addr, addr, sizeof(e->addr)); /* HW */ memcpy(&iface, &e->key.iface, sizeof(iface)); iface.vr_id = e->vr->hw_vr_id; err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id); if (err) goto err_hw_create; list_add(&e->router_node, &sw->router->rif_entry_list); return e; err_hw_create: prestera_vr_put(sw, e->vr); err_vr_get: err_key_copy: kfree(e); err_kzalloc: return NULL; } static void __prestera_nh_neigh_destroy(struct prestera_switch *sw, struct prestera_nh_neigh *neigh) { rhashtable_remove_fast(&sw->router->nh_neigh_ht, &neigh->ht_node, __prestera_nh_neigh_ht_params); kfree(neigh); } static struct prestera_nh_neigh * __prestera_nh_neigh_create(struct prestera_switch *sw, struct prestera_nh_neigh_key *key) { struct prestera_nh_neigh *neigh; int err; neigh = kzalloc(sizeof(*neigh), GFP_KERNEL); if (!neigh) goto err_kzalloc; memcpy(&neigh->key, key, sizeof(*key)); neigh->info.connected = false; INIT_LIST_HEAD(&neigh->nexthop_group_list); err = rhashtable_insert_fast(&sw->router->nh_neigh_ht, &neigh->ht_node, __prestera_nh_neigh_ht_params); if (err) goto err_rhashtable_insert; return neigh; err_rhashtable_insert: kfree(neigh); err_kzalloc: return NULL; } struct prestera_nh_neigh * prestera_nh_neigh_find(struct prestera_switch *sw, struct prestera_nh_neigh_key *key) { struct prestera_nh_neigh *nh_neigh; nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht, key, __prestera_nh_neigh_ht_params); return nh_neigh; } struct prestera_nh_neigh * prestera_nh_neigh_get(struct prestera_switch *sw, struct prestera_nh_neigh_key *key) { struct prestera_nh_neigh *neigh; neigh = prestera_nh_neigh_find(sw, key); if (!neigh) return __prestera_nh_neigh_create(sw, key); return neigh; } void prestera_nh_neigh_put(struct prestera_switch *sw, struct prestera_nh_neigh *neigh) { if (list_empty(&neigh->nexthop_group_list)) __prestera_nh_neigh_destroy(sw, neigh); } /* Updates new prestera_neigh_info */ int prestera_nh_neigh_set(struct prestera_switch *sw, struct prestera_nh_neigh *neigh) { struct prestera_nh_neigh_head *nh_head; struct prestera_nexthop_group *nh_grp; int err; list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) { nh_grp = nh_head->this; err = prestera_nexthop_group_set(sw, nh_grp); if (err) return err; } return 0; } bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw, struct prestera_nh_neigh *nh_neigh) { bool state; struct prestera_nh_neigh_head *nh_head, *tmp; state = false; list_for_each_entry_safe(nh_head, tmp, &nh_neigh->nexthop_group_list, head) { state = prestera_nexthop_group_util_hw_state(sw, nh_head->this); if (state) goto out; } out: return state; } static struct prestera_nexthop_group * __prestera_nexthop_group_create(struct prestera_switch *sw, struct prestera_nexthop_group_key *key) { struct prestera_nexthop_group *nh_grp; struct prestera_nh_neigh *nh_neigh; int nh_cnt, err, gid; nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); if (!nh_grp) goto err_kzalloc; memcpy(&nh_grp->key, key, sizeof(*key)); for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt])) break; nh_neigh = prestera_nh_neigh_get(sw, &nh_grp->key.neigh[nh_cnt]); if (!nh_neigh) goto err_nh_neigh_get; nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh; nh_grp->nh_neigh_head[nh_cnt].this = nh_grp; list_add(&nh_grp->nh_neigh_head[nh_cnt].head, &nh_neigh->nexthop_group_list); } err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id); if (err) goto err_nh_group_create; err = prestera_nexthop_group_set(sw, nh_grp); if (err) goto err_nexthop_group_set; err = rhashtable_insert_fast(&sw->router->nexthop_group_ht, &nh_grp->ht_node, __prestera_nexthop_group_ht_params); if (err) goto err_ht_insert; /* reset cache for created group */ gid = nh_grp->grp_id; sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8); return nh_grp; err_ht_insert: err_nexthop_group_set: prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); err_nh_group_create: err_nh_neigh_get: for (nh_cnt--; nh_cnt >= 0; nh_cnt--) { list_del(&nh_grp->nh_neigh_head[nh_cnt].head); prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh); } kfree(nh_grp); err_kzalloc: return NULL; } static void __prestera_nexthop_group_destroy(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp) { struct prestera_nh_neigh *nh_neigh; int nh_cnt; rhashtable_remove_fast(&sw->router->nexthop_group_ht, &nh_grp->ht_node, __prestera_nexthop_group_ht_params); for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; if (!nh_neigh) break; list_del(&nh_grp->nh_neigh_head[nh_cnt].head); prestera_nh_neigh_put(sw, nh_neigh); } prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id); kfree(nh_grp); } static struct prestera_nexthop_group * __prestera_nexthop_group_find(struct prestera_switch *sw, struct prestera_nexthop_group_key *key) { struct prestera_nexthop_group *nh_grp; nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht, key, __prestera_nexthop_group_ht_params); return nh_grp; } static struct prestera_nexthop_group * prestera_nexthop_group_get(struct prestera_switch *sw, struct prestera_nexthop_group_key *key) { struct prestera_nexthop_group *nh_grp; nh_grp = __prestera_nexthop_group_find(sw, key); if (nh_grp) { refcount_inc(&nh_grp->refcount); } else { nh_grp = __prestera_nexthop_group_create(sw, key); if (!nh_grp) return ERR_PTR(-ENOMEM); refcount_set(&nh_grp->refcount, 1); } return nh_grp; } static void prestera_nexthop_group_put(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp) { if (refcount_dec_and_test(&nh_grp->refcount)) __prestera_nexthop_group_destroy(sw, nh_grp); } /* Updates with new nh_neigh's info */ static int prestera_nexthop_group_set(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp) { struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX]; struct prestera_nh_neigh *neigh; int nh_cnt; memset(&info[0], 0, sizeof(info)); for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) { neigh = nh_grp->nh_neigh_head[nh_cnt].neigh; if (!neigh) break; memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info)); } return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id); } static bool prestera_nexthop_group_util_hw_state(struct prestera_switch *sw, struct prestera_nexthop_group *nh_grp) { int err; u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1; u32 gid = nh_grp->grp_id; u8 *cache = sw->router->nhgrp_hw_state_cache; /* Antijitter * Prevent situation, when we read state of nh_grp twice in short time, * and state bit is still cleared on second call. So just stuck active * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred. */ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick + msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) { err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size); if (err) { pr_err("Failed to get hw state nh_grp's"); return false; } sw->router->nhgrp_hw_cache_kick = jiffies; } if (cache[gid / 8] & BIT(gid % 8)) return true; return false; } struct prestera_fib_node * prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key) { struct prestera_fib_node *fib_node; fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key, __prestera_fib_ht_params); return fib_node; } static void __prestera_fib_node_destruct(struct prestera_switch *sw, struct prestera_fib_node *fib_node) { struct prestera_vr *vr; vr = fib_node->info.vr; prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4, fib_node->key.prefix_len); switch (fib_node->info.type) { case PRESTERA_FIB_TYPE_UC_NH: prestera_nexthop_group_put(sw, fib_node->info.nh_grp); break; case PRESTERA_FIB_TYPE_TRAP: break; case PRESTERA_FIB_TYPE_DROP: break; default: pr_err("Unknown fib_node->info.type = %d", fib_node->info.type); } prestera_vr_put(sw, vr); } void prestera_fib_node_destroy(struct prestera_switch *sw, struct prestera_fib_node *fib_node) { __prestera_fib_node_destruct(sw, fib_node); rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node, __prestera_fib_ht_params); kfree(fib_node); } static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg) { struct prestera_fib_node *node = ptr; struct prestera_switch *sw = arg; __prestera_fib_node_destruct(sw, node); kfree(node); } struct prestera_fib_node * prestera_fib_node_create(struct prestera_switch *sw, struct prestera_fib_key *key, enum prestera_fib_type fib_type, struct prestera_nexthop_group_key *nh_grp_key) { struct prestera_fib_node *fib_node; u32 grp_id; struct prestera_vr *vr; int err; fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL); if (!fib_node) goto err_kzalloc; memcpy(&fib_node->key, key, sizeof(*key)); fib_node->info.type = fib_type; vr = prestera_vr_get(sw, key->tb_id, NULL); if (IS_ERR(vr)) goto err_vr_get; fib_node->info.vr = vr; switch (fib_type) { case PRESTERA_FIB_TYPE_TRAP: grp_id = PRESTERA_NHGR_UNUSED; break; case PRESTERA_FIB_TYPE_DROP: grp_id = PRESTERA_NHGR_DROP; break; case PRESTERA_FIB_TYPE_UC_NH: fib_node->info.nh_grp = prestera_nexthop_group_get(sw, nh_grp_key); if (IS_ERR(fib_node->info.nh_grp)) goto err_nh_grp_get; grp_id = fib_node->info.nh_grp->grp_id; break; default: pr_err("Unsupported fib_type %d", fib_type); goto err_nh_grp_get; } err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4, key->prefix_len, grp_id); if (err) goto err_lpm_add; err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node, __prestera_fib_ht_params); if (err) goto err_ht_insert; return fib_node; err_ht_insert: prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4, key->prefix_len); err_lpm_add: if (fib_type == PRESTERA_FIB_TYPE_UC_NH) prestera_nexthop_group_put(sw, fib_node->info.nh_grp); err_nh_grp_get: prestera_vr_put(sw, vr); err_vr_get: kfree(fib_node); err_kzalloc: return NULL; }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include "prestera_ethtool.h" #include "prestera.h" #include "prestera_hw.h" #define PRESTERA_STATS_CNT \ (sizeof(struct prestera_port_stats) / sizeof(u64)) #define PRESTERA_STATS_IDX(name) \ (offsetof(struct prestera_port_stats, name) / sizeof(u64)) #define PRESTERA_STATS_FIELD(name) \ [PRESTERA_STATS_IDX(name)] = __stringify(name) static const char driver_kind[] = "prestera"; static const struct prestera_link_mode { enum ethtool_link_mode_bit_indices eth_mode; u32 speed; u64 pr_mask; u8 duplex; u8 port_type; } port_link_modes[PRESTERA_LINK_MODE_MAX] = { [PRESTERA_LINK_MODE_10baseT_Half] = { .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT, .speed = 10, .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half, .duplex = PRESTERA_PORT_DUPLEX_HALF, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_10baseT_Full] = { .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT, .speed = 10, .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_100baseT_Half] = { .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT, .speed = 100, .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half, .duplex = PRESTERA_PORT_DUPLEX_HALF, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_100baseT_Full] = { .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT, .speed = 100, .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_1000baseT_Half] = { .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, .speed = 1000, .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half, .duplex = PRESTERA_PORT_DUPLEX_HALF, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_1000baseT_Full] = { .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, .speed = 1000, .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_1000baseX_Full] = { .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, .speed = 1000, .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_1000baseKX_Full] = { .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, .speed = 1000, .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_2500baseX_Full] = { .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, .speed = 2500, .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, }, [PRESTERA_LINK_MODE_10GbaseKR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, .speed = 10000, .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_10GbaseSR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, .speed = 10000, .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_10GbaseLR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, .speed = 10000, .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_20GbaseKR2_Full] = { .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, .speed = 20000, .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_25GbaseCR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, .speed = 25000, .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_DA, }, [PRESTERA_LINK_MODE_25GbaseKR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, .speed = 25000, .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_25GbaseSR_Full] = { .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, .speed = 25000, .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_40GbaseKR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, .speed = 40000, .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_40GbaseCR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, .speed = 40000, .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_DA, }, [PRESTERA_LINK_MODE_40GbaseSR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, .speed = 40000, .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_50GbaseCR2_Full] = { .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, .speed = 50000, .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_DA, }, [PRESTERA_LINK_MODE_50GbaseKR2_Full] = { .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, .speed = 50000, .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_50GbaseSR2_Full] = { .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, .speed = 50000, .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_100GbaseKR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, .speed = 100000, .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_TP, }, [PRESTERA_LINK_MODE_100GbaseSR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, .speed = 100000, .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_FIBRE, }, [PRESTERA_LINK_MODE_100GbaseCR4_Full] = { .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, .speed = 100000, .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full, .duplex = PRESTERA_PORT_DUPLEX_FULL, .port_type = PRESTERA_PORT_TYPE_DA, } }; static const struct prestera_fec { u32 eth_fec; enum ethtool_link_mode_bit_indices eth_mode; u8 pr_fec; } port_fec_caps[PRESTERA_PORT_FEC_MAX] = { [PRESTERA_PORT_FEC_OFF] = { .eth_fec = ETHTOOL_FEC_OFF, .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT, .pr_fec = 1 << PRESTERA_PORT_FEC_OFF, }, [PRESTERA_PORT_FEC_BASER] = { .eth_fec = ETHTOOL_FEC_BASER, .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT, .pr_fec = 1 << PRESTERA_PORT_FEC_BASER, }, [PRESTERA_PORT_FEC_RS] = { .eth_fec = ETHTOOL_FEC_RS, .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT, .pr_fec = 1 << PRESTERA_PORT_FEC_RS, } }; static const struct prestera_port_type { enum ethtool_link_mode_bit_indices eth_mode; u8 eth_type; } port_types[PRESTERA_PORT_TYPE_MAX] = { [PRESTERA_PORT_TYPE_NONE] = { .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, .eth_type = PORT_NONE, }, [PRESTERA_PORT_TYPE_TP] = { .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, .eth_type = PORT_TP, }, [PRESTERA_PORT_TYPE_AUI] = { .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT, .eth_type = PORT_AUI, }, [PRESTERA_PORT_TYPE_MII] = { .eth_mode = ETHTOOL_LINK_MODE_MII_BIT, .eth_type = PORT_MII, }, [PRESTERA_PORT_TYPE_FIBRE] = { .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT, .eth_type = PORT_FIBRE, }, [PRESTERA_PORT_TYPE_BNC] = { .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT, .eth_type = PORT_BNC, }, [PRESTERA_PORT_TYPE_DA] = { .eth_mode = ETHTOOL_LINK_MODE_TP_BIT, .eth_type = PORT_TP, }, [PRESTERA_PORT_TYPE_OTHER] = { .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS, .eth_type = PORT_OTHER, } }; static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = { PRESTERA_STATS_FIELD(good_octets_received), PRESTERA_STATS_FIELD(bad_octets_received), PRESTERA_STATS_FIELD(mac_trans_error), PRESTERA_STATS_FIELD(broadcast_frames_received), PRESTERA_STATS_FIELD(multicast_frames_received), PRESTERA_STATS_FIELD(frames_64_octets), PRESTERA_STATS_FIELD(frames_65_to_127_octets), PRESTERA_STATS_FIELD(frames_128_to_255_octets), PRESTERA_STATS_FIELD(frames_256_to_511_octets), PRESTERA_STATS_FIELD(frames_512_to_1023_octets), PRESTERA_STATS_FIELD(frames_1024_to_max_octets), PRESTERA_STATS_FIELD(excessive_collision), PRESTERA_STATS_FIELD(multicast_frames_sent), PRESTERA_STATS_FIELD(broadcast_frames_sent), PRESTERA_STATS_FIELD(fc_sent), PRESTERA_STATS_FIELD(fc_received), PRESTERA_STATS_FIELD(buffer_overrun), PRESTERA_STATS_FIELD(undersize), PRESTERA_STATS_FIELD(fragments), PRESTERA_STATS_FIELD(oversize), PRESTERA_STATS_FIELD(jabber), PRESTERA_STATS_FIELD(rx_error_frame_received), PRESTERA_STATS_FIELD(bad_crc), PRESTERA_STATS_FIELD(collisions), PRESTERA_STATS_FIELD(late_collision), PRESTERA_STATS_FIELD(unicast_frames_received), PRESTERA_STATS_FIELD(unicast_frames_sent), PRESTERA_STATS_FIELD(sent_multiple), PRESTERA_STATS_FIELD(sent_deferred), PRESTERA_STATS_FIELD(good_octets_sent), }; static void prestera_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct prestera_port *port = netdev_priv(dev); struct prestera_switch *sw = port->sw; strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)), sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", sw->dev->fw_rev.maj, sw->dev->fw_rev.min, sw->dev->fw_rev.sub); } static u8 prestera_port_type_get(struct prestera_port *port) { if (port->caps.type < PRESTERA_PORT_TYPE_MAX) return port_types[port->caps.type].eth_type; return PORT_OTHER; } static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { u32 new_mode = PRESTERA_LINK_MODE_MAX; u32 type, mode; for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) { if (port_types[type].eth_type == ecmd->base.port && test_bit(port_types[type].eth_mode, ecmd->link_modes.supported)) { break; } } if (type == port->caps.type) return 0; if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE) return -EINVAL; if (type == PRESTERA_PORT_TYPE_MAX) return -EOPNOTSUPP; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { if ((port_link_modes[mode].pr_mask & port->caps.supp_link_modes) && type == port_link_modes[mode].port_type) { new_mode = mode; } } if (new_mode >= PRESTERA_LINK_MODE_MAX) return -EINVAL; port->caps.type = type; port->autoneg = false; return 0; } static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes, u8 fec, u8 type) { u32 mode; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { if ((port_link_modes[mode].pr_mask & link_modes) == 0) continue; if (type != PRESTERA_PORT_TYPE_NONE && port_link_modes[mode].port_type != type) continue; __set_bit(port_link_modes[mode].eth_mode, eth_modes); } for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { if ((port_fec_caps[mode].pr_fec & fec) == 0) continue; __set_bit(port_fec_caps[mode].eth_mode, eth_modes); } } static void prestera_modes_from_eth(const unsigned long *eth_modes, u64 *link_modes, u8 *fec, u8 type) { u64 adver_modes = 0; u32 fec_modes = 0; u32 mode; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { if (!test_bit(port_link_modes[mode].eth_mode, eth_modes)) continue; if (port_link_modes[mode].port_type != type) continue; adver_modes |= port_link_modes[mode].pr_mask; } for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes)) continue; fec_modes |= port_fec_caps[mode].pr_fec; } *link_modes = adver_modes; *fec = fec_modes; } static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { u32 mode; u8 ptype; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { if ((port_link_modes[mode].pr_mask & port->caps.supp_link_modes) == 0) continue; ptype = port_link_modes[mode].port_type; __set_bit(port_types[ptype].eth_mode, ecmd->link_modes.supported); } } static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { struct prestera_port_phy_state *state = &port->state_phy; bool asym_pause; bool pause; u64 bitmap; int err; err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap, &state->remote_fc.pause, &state->remote_fc.asym_pause); if (err) netdev_warn(port->dev, "Remote link caps get failed %d", port->caps.transceiver); bitmap = state->lmode_bmap; prestera_modes_to_eth(ecmd->link_modes.lp_advertising, bitmap, 0, PRESTERA_PORT_TYPE_NONE); if (!bitmap_empty(ecmd->link_modes.lp_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) { ethtool_link_ksettings_add_link_mode(ecmd, lp_advertising, Autoneg); } pause = state->remote_fc.pause; asym_pause = state->remote_fc.asym_pause; if (pause) ethtool_link_ksettings_add_link_mode(ecmd, lp_advertising, Pause); if (asym_pause) ethtool_link_ksettings_add_link_mode(ecmd, lp_advertising, Asym_Pause); } static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { struct prestera_port_mac_state *state = &port->state_mac; u32 speed; u8 duplex; int err; if (!port->state_mac.oper) return; if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) { err = prestera_hw_port_mac_mode_get(port, NULL, &speed, &duplex, NULL); if (err) { state->speed = SPEED_UNKNOWN; state->duplex = DUPLEX_UNKNOWN; } else { state->speed = speed; state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; } } ecmd->base.speed = port->state_mac.speed; ecmd->base.duplex = port->state_mac.duplex; } static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { struct prestera_port_phy_state *state = &port->state_phy; if (prestera_hw_port_phy_mode_get(port, &state->mdix, NULL, NULL, NULL)) { netdev_warn(port->dev, "MDIX params get failed"); state->mdix = ETH_TP_MDI_INVALID; } ecmd->base.eth_tp_mdix = port->state_phy.mdix; ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix; } static int prestera_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *ecmd) { struct prestera_port *port = netdev_priv(dev); ethtool_link_ksettings_zero_link_mode(ecmd, supported); ethtool_link_ksettings_zero_link_mode(ecmd, advertising); ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising); ecmd->base.speed = SPEED_UNKNOWN; ecmd->base.duplex = DUPLEX_UNKNOWN; if (port->phy_link) return phylink_ethtool_ksettings_get(port->phy_link, ecmd); ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; if (port->caps.type == PRESTERA_PORT_TYPE_TP) { ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg); if (netif_running(dev) && (port->autoneg || port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)) ethtool_link_ksettings_add_link_mode(ecmd, advertising, Autoneg); } prestera_modes_to_eth(ecmd->link_modes.supported, port->caps.supp_link_modes, port->caps.supp_fec, port->caps.type); prestera_port_supp_types_get(ecmd, port); if (netif_carrier_ok(dev)) prestera_port_link_mode_get(ecmd, port); ecmd->base.port = prestera_port_type_get(port); if (port->autoneg) { if (netif_running(dev)) prestera_modes_to_eth(ecmd->link_modes.advertising, port->adver_link_modes, port->adver_fec, port->caps.type); if (netif_carrier_ok(dev) && port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) prestera_port_remote_cap_get(ecmd, port); } if (port->caps.type == PRESTERA_PORT_TYPE_TP && port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) prestera_port_mdix_get(ecmd, port); return 0; } static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID && port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && port->caps.type == PRESTERA_PORT_TYPE_TP) { port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl; return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, port->autoneg, port->cfg_phy.mode, port->adver_link_modes, port->cfg_phy.mdix); } return 0; } static int prestera_port_link_mode_set(struct prestera_port *port, u32 speed, u8 duplex, u8 type) { u32 new_mode = PRESTERA_LINK_MODE_MAX; u32 mode; int err; for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) { if (speed != SPEED_UNKNOWN && speed != port_link_modes[mode].speed) continue; if (duplex != DUPLEX_UNKNOWN && duplex != port_link_modes[mode].duplex) continue; if (!(port_link_modes[mode].pr_mask & port->caps.supp_link_modes)) continue; if (type != port_link_modes[mode].port_type) continue; new_mode = mode; break; } if (new_mode == PRESTERA_LINK_MODE_MAX) return -EOPNOTSUPP; err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, false, new_mode, 0, port->cfg_phy.mdix); if (err) return err; port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); port->adver_link_modes = 0; port->cfg_phy.mode = new_mode; port->autoneg = false; return 0; } static int prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd, struct prestera_port *port) { u8 duplex = DUPLEX_UNKNOWN; if (ecmd->base.duplex != DUPLEX_UNKNOWN) duplex = ecmd->base.duplex == DUPLEX_FULL ? PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF; return prestera_port_link_mode_set(port, ecmd->base.speed, duplex, port->caps.type); } static int prestera_ethtool_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *ecmd) { struct prestera_port *port = netdev_priv(dev); u64 adver_modes; u8 adver_fec; int err; if (port->phy_link) return phylink_ethtool_ksettings_set(port->phy_link, ecmd); err = prestera_port_type_set(ecmd, port); if (err) return err; if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) { err = prestera_port_mdix_set(ecmd, port); if (err) return err; } prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes, &adver_fec, port->caps.type); if (ecmd->base.autoneg == AUTONEG_ENABLE) err = prestera_port_autoneg_set(port, adver_modes); else err = prestera_port_speed_duplex_set(ecmd, port); return err; } static int prestera_ethtool_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { struct prestera_port *port = netdev_priv(dev); u8 active; u32 mode; int err; err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active); if (err) return err; fecparam->fec = 0; for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0) continue; fecparam->fec |= port_fec_caps[mode].eth_fec; } if (active < PRESTERA_PORT_FEC_MAX) fecparam->active_fec = port_fec_caps[active].eth_fec; else fecparam->active_fec = ETHTOOL_FEC_AUTO; return 0; } static int prestera_ethtool_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { struct prestera_port *port = netdev_priv(dev); struct prestera_port_mac_config cfg_mac; u32 mode; u8 fec; if (port->autoneg) { netdev_err(dev, "FEC set is not allowed while autoneg is on\n"); return -EINVAL; } if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { netdev_err(dev, "FEC set is not allowed on non-SFP ports\n"); return -EINVAL; } fec = PRESTERA_PORT_FEC_MAX; for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) { if ((port_fec_caps[mode].eth_fec & fecparam->fec) && (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) { fec = mode; break; } } prestera_port_cfg_mac_read(port, &cfg_mac); if (fec == cfg_mac.fec) return 0; if (fec == PRESTERA_PORT_FEC_MAX) { netdev_err(dev, "Unsupported FEC requested"); return -EINVAL; } cfg_mac.fec = fec; return prestera_port_cfg_mac_write(port, &cfg_mac); } static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return PRESTERA_STATS_CNT; default: return -EOPNOTSUPP; } } static void prestera_ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset != ETH_SS_STATS) return; memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name)); } static void prestera_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct prestera_port *port = netdev_priv(dev); struct prestera_port_stats *port_stats; port_stats = &port->cached_hw_stats.stats; memcpy(data, port_stats, sizeof(*port_stats)); } static int prestera_ethtool_nway_reset(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); if (netif_running(dev) && port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER && port->caps.type == PRESTERA_PORT_TYPE_TP) return prestera_hw_port_autoneg_restart(port); return -EINVAL; } const struct ethtool_ops prestera_ethtool_ops = { .get_drvinfo = prestera_ethtool_get_drvinfo, .get_link_ksettings = prestera_ethtool_get_link_ksettings, .set_link_ksettings = prestera_ethtool_set_link_ksettings, .get_fecparam = prestera_ethtool_get_fecparam, .set_fecparam = prestera_ethtool_set_fecparam, .get_sset_count = prestera_ethtool_get_sset_count, .get_strings = prestera_ethtool_get_strings, .get_ethtool_stats = prestera_ethtool_get_stats, .get_link = ethtool_op_get_link, .nway_reset = prestera_ethtool_nway_reset };
linux-master
drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/etherdevice.h> #include <linux/jiffies.h> #include <linux/list.h> #include <linux/module.h> #include <linux/netdev_features.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/if_vlan.h> #include <linux/phylink.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" #include "prestera_flow.h" #include "prestera_span.h" #include "prestera_rxtx.h" #include "prestera_devlink.h" #include "prestera_ethtool.h" #include "prestera_counter.h" #include "prestera_switchdev.h" #define PRESTERA_MTU_DEFAULT 1536 #define PRESTERA_STATS_DELAY_MS 1000 #define PRESTERA_MAC_ADDR_NUM_MAX 255 static struct workqueue_struct *prestera_wq; static struct workqueue_struct *prestera_owq; void prestera_queue_work(struct work_struct *work) { queue_work(prestera_owq, work); } void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay) { queue_delayed_work(prestera_wq, work, delay); } void prestera_queue_drain(void) { drain_workqueue(prestera_wq); drain_workqueue(prestera_owq); } int prestera_port_learning_set(struct prestera_port *port, bool learn) { return prestera_hw_port_learning_set(port, learn); } int prestera_port_uc_flood_set(struct prestera_port *port, bool flood) { return prestera_hw_port_uc_flood_set(port, flood); } int prestera_port_mc_flood_set(struct prestera_port *port, bool flood) { return prestera_hw_port_mc_flood_set(port, flood); } int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked) { return prestera_hw_port_br_locked_set(port, br_locked); } int prestera_port_pvid_set(struct prestera_port *port, u16 vid) { enum prestera_accept_frm_type frm_type; int err; frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED; if (vid) { err = prestera_hw_vlan_port_vid_set(port, vid); if (err) return err; frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL; } err = prestera_hw_port_accept_frm_type(port, frm_type); if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL) prestera_hw_vlan_port_vid_set(port, port->pvid); port->pvid = vid; return 0; } struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, u32 dev_id, u32 hw_id) { struct prestera_port *port = NULL, *tmp; read_lock(&sw->port_list_lock); list_for_each_entry(tmp, &sw->port_list, list) { if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) { port = tmp; break; } } read_unlock(&sw->port_list_lock); return port; } struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id) { struct prestera_port *port = NULL, *tmp; read_lock(&sw->port_list_lock); list_for_each_entry(tmp, &sw->port_list, list) { if (tmp->id == id) { port = tmp; break; } } read_unlock(&sw->port_list_lock); return port; } struct prestera_switch *prestera_switch_get(struct net_device *dev) { struct prestera_port *port; port = prestera_port_dev_lower_find(dev); return port ? port->sw : NULL; } int prestera_port_cfg_mac_read(struct prestera_port *port, struct prestera_port_mac_config *cfg) { *cfg = port->cfg_mac; return 0; } int prestera_port_cfg_mac_write(struct prestera_port *port, struct prestera_port_mac_config *cfg) { int err; err = prestera_hw_port_mac_mode_set(port, cfg->admin, cfg->mode, cfg->inband, cfg->speed, cfg->duplex, cfg->fec); if (err) return err; port->cfg_mac = *cfg; return 0; } static int prestera_port_open(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); struct prestera_port_mac_config cfg_mac; int err = 0; if (port->phy_link) { phylink_start(port->phy_link); } else { if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { err = prestera_port_cfg_mac_read(port, &cfg_mac); if (!err) { cfg_mac.admin = true; err = prestera_port_cfg_mac_write(port, &cfg_mac); } } else { port->cfg_phy.admin = true; err = prestera_hw_port_phy_mode_set(port, true, port->autoneg, port->cfg_phy.mode, port->adver_link_modes, port->cfg_phy.mdix); } } netif_start_queue(dev); return err; } static int prestera_port_close(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); struct prestera_port_mac_config cfg_mac; int err = 0; netif_stop_queue(dev); if (port->phy_link) { phylink_stop(port->phy_link); phylink_disconnect_phy(port->phy_link); err = prestera_port_cfg_mac_read(port, &cfg_mac); if (!err) { cfg_mac.admin = false; prestera_port_cfg_mac_write(port, &cfg_mac); } } else { if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) { err = prestera_port_cfg_mac_read(port, &cfg_mac); if (!err) { cfg_mac.admin = false; prestera_port_cfg_mac_write(port, &cfg_mac); } } else { port->cfg_phy.admin = false; err = prestera_hw_port_phy_mode_set(port, false, port->autoneg, port->cfg_phy.mode, port->adver_link_modes, port->cfg_phy.mdix); } } return err; } static void prestera_port_mac_state_cache_read(struct prestera_port *port, struct prestera_port_mac_state *state) { spin_lock(&port->state_mac_lock); *state = port->state_mac; spin_unlock(&port->state_mac_lock); } static void prestera_port_mac_state_cache_write(struct prestera_port *port, struct prestera_port_mac_state *state) { spin_lock(&port->state_mac_lock); port->state_mac = *state; spin_unlock(&port->state_mac_lock); } static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs) { return container_of(pcs, struct prestera_port, phylink_pcs); } static void prestera_mac_config(struct phylink_config *config, unsigned int an_mode, const struct phylink_link_state *state) { } static void prestera_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct prestera_port *port = netdev_priv(ndev); struct prestera_port_mac_state state_mac; /* Invalidate. Parameters will update on next link event. */ memset(&state_mac, 0, sizeof(state_mac)); state_mac.valid = false; prestera_port_mac_state_cache_write(port, &state_mac); } static void prestera_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { } static struct phylink_pcs * prestera_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct net_device *dev = to_net_dev(config->dev); struct prestera_port *port = netdev_priv(dev); return &port->phylink_pcs; } static void prestera_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct prestera_port *port = container_of(pcs, struct prestera_port, phylink_pcs); struct prestera_port_mac_state smac; prestera_port_mac_state_cache_read(port, &smac); if (smac.valid) { state->link = smac.oper ? 1 : 0; /* AN is completed, when port is up */ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0; state->speed = smac.speed; state->duplex = smac.duplex; } else { state->link = 0; state->an_complete = 0; } } static int prestera_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct prestera_port *port = prestera_pcs_to_port(pcs); struct prestera_port_mac_config cfg_mac; int err; err = prestera_port_cfg_mac_read(port, &cfg_mac); if (err) return err; cfg_mac.admin = true; cfg_mac.fec = PRESTERA_PORT_FEC_OFF; cfg_mac.inband = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED; switch (interface) { case PHY_INTERFACE_MODE_10GBASER: cfg_mac.speed = SPEED_10000; cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR; break; case PHY_INTERFACE_MODE_2500BASEX: cfg_mac.speed = SPEED_2500; cfg_mac.duplex = DUPLEX_FULL; cfg_mac.mode = PRESTERA_MAC_MODE_SGMII; break; case PHY_INTERFACE_MODE_SGMII: cfg_mac.mode = PRESTERA_MAC_MODE_SGMII; break; case PHY_INTERFACE_MODE_1000BASEX: default: cfg_mac.speed = SPEED_1000; cfg_mac.duplex = DUPLEX_FULL; cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X; break; } err = prestera_port_cfg_mac_write(port, &cfg_mac); if (err) return err; return 0; } static void prestera_pcs_an_restart(struct phylink_pcs *pcs) { /* TODO: add 1000basex AN restart support * (Currently FW has no support for 1000baseX AN restart, but it will in the future, * so as for now the function would stay empty.) */ } static const struct phylink_mac_ops prestera_mac_ops = { .mac_select_pcs = prestera_mac_select_pcs, .mac_config = prestera_mac_config, .mac_link_down = prestera_mac_link_down, .mac_link_up = prestera_mac_link_up, }; static const struct phylink_pcs_ops prestera_pcs_ops = { .pcs_get_state = prestera_pcs_get_state, .pcs_config = prestera_pcs_config, .pcs_an_restart = prestera_pcs_an_restart, }; static int prestera_port_sfp_bind(struct prestera_port *port) { struct prestera_switch *sw = port->sw; struct device_node *ports, *node; struct fwnode_handle *fwnode; struct phylink *phy_link; int err; if (!sw->np) return 0; of_node_get(sw->np); ports = of_find_node_by_name(sw->np, "ports"); for_each_child_of_node(ports, node) { int num; err = of_property_read_u32(node, "prestera,port-num", &num); if (err) { dev_err(sw->dev->dev, "device node %pOF has no valid reg property: %d\n", node, err); goto out; } if (port->fp_id != num) continue; port->phylink_pcs.ops = &prestera_pcs_ops; port->phylink_pcs.neg_mode = true; port->phy_config.dev = &port->dev->dev; port->phy_config.type = PHYLINK_NETDEV; fwnode = of_fwnode_handle(node); __set_bit(PHY_INTERFACE_MODE_10GBASER, port->phy_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_2500BASEX, port->phy_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_SGMII, port->phy_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->phy_config.supported_interfaces); port->phy_config.mac_capabilities = MAC_1000 | MAC_2500FD | MAC_10000FD; phy_link = phylink_create(&port->phy_config, fwnode, PHY_INTERFACE_MODE_INTERNAL, &prestera_mac_ops); if (IS_ERR(phy_link)) { netdev_err(port->dev, "failed to create phylink\n"); err = PTR_ERR(phy_link); goto out; } port->phy_link = phy_link; break; } out: of_node_put(node); of_node_put(ports); return err; } static int prestera_port_sfp_unbind(struct prestera_port *port) { if (port->phy_link) phylink_destroy(port->phy_link); return 0; } static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, struct net_device *dev) { return prestera_rxtx_xmit(netdev_priv(dev), skb); } int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr) { if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; /* firmware requires that port's MAC address contains first 5 bytes * of the base MAC address */ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1)) return -EINVAL; return 0; } static int prestera_port_set_mac_address(struct net_device *dev, void *p) { struct prestera_port *port = netdev_priv(dev); struct sockaddr *addr = p; int err; err = prestera_is_valid_mac_addr(port, addr->sa_data); if (err) return err; err = prestera_hw_port_mac_set(port, addr->sa_data); if (err) return err; eth_hw_addr_set(dev, addr->sa_data); return 0; } static int prestera_port_change_mtu(struct net_device *dev, int mtu) { struct prestera_port *port = netdev_priv(dev); int err; err = prestera_hw_port_mtu_set(port, mtu); if (err) return err; dev->mtu = mtu; return 0; } static void prestera_port_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct prestera_port *port = netdev_priv(dev); struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats; stats->rx_packets = port_stats->broadcast_frames_received + port_stats->multicast_frames_received + port_stats->unicast_frames_received; stats->tx_packets = port_stats->broadcast_frames_sent + port_stats->multicast_frames_sent + port_stats->unicast_frames_sent; stats->rx_bytes = port_stats->good_octets_received; stats->tx_bytes = port_stats->good_octets_sent; stats->rx_errors = port_stats->rx_error_frame_received; stats->tx_errors = port_stats->mac_trans_error; stats->rx_dropped = port_stats->buffer_overrun; stats->tx_dropped = 0; stats->multicast = port_stats->multicast_frames_received; stats->collisions = port_stats->excessive_collision; stats->rx_crc_errors = port_stats->bad_crc; } static void prestera_port_get_hw_stats(struct prestera_port *port) { prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats); } static void prestera_port_stats_update(struct work_struct *work) { struct prestera_port *port = container_of(work, struct prestera_port, cached_hw_stats.caching_dw.work); prestera_port_get_hw_stats(port); queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw, msecs_to_jiffies(PRESTERA_STATS_DELAY_MS)); } static int prestera_port_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct prestera_port *port = netdev_priv(dev); switch (type) { case TC_SETUP_BLOCK: return prestera_flow_block_setup(port, type_data); default: return -EOPNOTSUPP; } } static const struct net_device_ops prestera_netdev_ops = { .ndo_open = prestera_port_open, .ndo_stop = prestera_port_close, .ndo_start_xmit = prestera_port_xmit, .ndo_setup_tc = prestera_port_setup_tc, .ndo_change_mtu = prestera_port_change_mtu, .ndo_get_stats64 = prestera_port_get_stats64, .ndo_set_mac_address = prestera_port_set_mac_address, }; int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes) { int err; if (port->autoneg && port->adver_link_modes == link_modes) return 0; err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, true, 0, link_modes, port->cfg_phy.mdix); if (err) return err; port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF); port->adver_link_modes = link_modes; port->cfg_phy.mode = 0; port->autoneg = true; return 0; } static void prestera_port_list_add(struct prestera_port *port) { write_lock(&port->sw->port_list_lock); list_add(&port->list, &port->sw->port_list); write_unlock(&port->sw->port_list_lock); } static void prestera_port_list_del(struct prestera_port *port) { write_lock(&port->sw->port_list_lock); list_del(&port->list); write_unlock(&port->sw->port_list_lock); } static int prestera_port_create(struct prestera_switch *sw, u32 id) { struct prestera_port_mac_config cfg_mac; struct prestera_port *port; struct net_device *dev; int err; dev = alloc_etherdev(sizeof(*port)); if (!dev) return -ENOMEM; port = netdev_priv(dev); INIT_LIST_HEAD(&port->vlans_list); port->pvid = PRESTERA_DEFAULT_VID; port->lag = NULL; port->dev = dev; port->id = id; port->sw = sw; spin_lock_init(&port->state_mac_lock); err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id, &port->fp_id); if (err) { dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id); goto err_port_info_get; } err = prestera_devlink_port_register(port); if (err) goto err_dl_port_register; dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; dev->netdev_ops = &prestera_netdev_ops; dev->ethtool_ops = &prestera_ethtool_ops; SET_NETDEV_DEV(dev, sw->dev->dev); SET_NETDEV_DEVLINK_PORT(dev, &port->dl_port); if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) netif_carrier_off(dev); dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT); dev->min_mtu = sw->mtu_min; dev->max_mtu = sw->mtu_max; err = prestera_hw_port_mtu_set(port, dev->mtu); if (err) { dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n", id, dev->mtu); goto err_port_init; } if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) { err = -EINVAL; goto err_port_init; } eth_hw_addr_gen(dev, sw->base_mac, port->fp_id); /* firmware requires that port's MAC address consist of the first * 5 bytes of the base MAC address */ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) { dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id); dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1); } err = prestera_hw_port_mac_set(port, dev->dev_addr); if (err) { dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id); goto err_port_init; } err = prestera_hw_port_cap_get(port, &port->caps); if (err) { dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id); goto err_port_init; } port->adver_link_modes = port->caps.supp_link_modes; port->adver_fec = 0; port->autoneg = true; /* initialize config mac */ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { cfg_mac.admin = true; cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL; } else { cfg_mac.admin = false; cfg_mac.mode = PRESTERA_MAC_MODE_MAX; } cfg_mac.inband = 0; cfg_mac.speed = 0; cfg_mac.duplex = DUPLEX_UNKNOWN; cfg_mac.fec = PRESTERA_PORT_FEC_OFF; err = prestera_port_cfg_mac_write(port, &cfg_mac); if (err) { dev_err(prestera_dev(sw), "Failed to set port(%u) mac mode\n", id); goto err_port_init; } /* initialize config phy (if this is inegral) */ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) { port->cfg_phy.mdix = ETH_TP_MDI_AUTO; port->cfg_phy.admin = false; err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin, false, 0, 0, port->cfg_phy.mdix); if (err) { dev_err(prestera_dev(sw), "Failed to set port(%u) phy mode\n", id); goto err_port_init; } } err = prestera_rxtx_port_init(port); if (err) goto err_port_init; INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw, &prestera_port_stats_update); prestera_port_list_add(port); err = register_netdev(dev); if (err) goto err_register_netdev; err = prestera_port_sfp_bind(port); if (err) goto err_sfp_bind; return 0; err_sfp_bind: unregister_netdev(dev); err_register_netdev: prestera_port_list_del(port); err_port_init: prestera_devlink_port_unregister(port); err_dl_port_register: err_port_info_get: free_netdev(dev); return err; } static void prestera_port_destroy(struct prestera_port *port) { struct net_device *dev = port->dev; cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw); unregister_netdev(dev); prestera_port_list_del(port); prestera_devlink_port_unregister(port); free_netdev(dev); } static void prestera_destroy_ports(struct prestera_switch *sw) { struct prestera_port *port, *tmp; list_for_each_entry_safe(port, tmp, &sw->port_list, list) prestera_port_destroy(port); } static int prestera_create_ports(struct prestera_switch *sw) { struct prestera_port *port, *tmp; u32 port_idx; int err; for (port_idx = 0; port_idx < sw->port_count; port_idx++) { err = prestera_port_create(sw, port_idx); if (err) goto err_port_create; } return 0; err_port_create: list_for_each_entry_safe(port, tmp, &sw->port_list, list) { prestera_port_sfp_unbind(port); prestera_port_destroy(port); } return err; } static void prestera_port_handle_event(struct prestera_switch *sw, struct prestera_event *evt, void *arg) { struct prestera_port_mac_state smac; struct prestera_port_event *pevt; struct delayed_work *caching_dw; struct prestera_port *port; if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { pevt = &evt->port_evt; port = prestera_find_port(sw, pevt->port_id); if (!port || !port->dev) return; caching_dw = &port->cached_hw_stats.caching_dw; memset(&smac, 0, sizeof(smac)); smac.valid = true; smac.oper = pevt->data.mac.oper; if (smac.oper) { smac.mode = pevt->data.mac.mode; smac.speed = pevt->data.mac.speed; smac.duplex = pevt->data.mac.duplex; smac.fc = pevt->data.mac.fc; smac.fec = pevt->data.mac.fec; } prestera_port_mac_state_cache_write(port, &smac); if (port->state_mac.oper) { if (port->phy_link) phylink_mac_change(port->phy_link, true); else netif_carrier_on(port->dev); if (!delayed_work_pending(caching_dw)) queue_delayed_work(prestera_wq, caching_dw, 0); } else { if (port->phy_link) phylink_mac_change(port->phy_link, false); else if (netif_running(port->dev) && netif_carrier_ok(port->dev)) netif_carrier_off(port->dev); if (delayed_work_pending(caching_dw)) cancel_delayed_work(caching_dw); } } } static int prestera_event_handlers_register(struct prestera_switch *sw) { return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT, prestera_port_handle_event, NULL); } static void prestera_event_handlers_unregister(struct prestera_switch *sw) { prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT, prestera_port_handle_event); } static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw) { int ret; if (sw->np) ret = of_get_mac_address(sw->np, sw->base_mac); if (!is_valid_ether_addr(sw->base_mac) || ret) { eth_random_addr(sw->base_mac); dev_info(prestera_dev(sw), "using random base mac address\n"); } return prestera_hw_switch_mac_set(sw, sw->base_mac); } struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id) { return id < sw->lag_max ? &sw->lags[id] : NULL; } static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw, struct net_device *dev) { struct prestera_lag *lag; u16 id; for (id = 0; id < sw->lag_max; id++) { lag = &sw->lags[id]; if (lag->dev == dev) return lag; } return NULL; } int prestera_lag_id(struct prestera_switch *sw, struct net_device *lag_dev, u16 *lag_id) { struct prestera_lag *lag; int free_id = -1; int id; for (id = 0; id < sw->lag_max; id++) { lag = prestera_lag_by_id(sw, id); if (lag->member_count) { if (lag->dev == lag_dev) { *lag_id = id; return 0; } } else if (free_id < 0) { free_id = id; } } if (free_id < 0) return -ENOSPC; *lag_id = free_id; return 0; } static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw, struct net_device *lag_dev) { struct prestera_lag *lag = NULL; u16 id; for (id = 0; id < sw->lag_max; id++) { lag = &sw->lags[id]; if (!lag->dev) break; } if (lag) { INIT_LIST_HEAD(&lag->members); lag->dev = lag_dev; } return lag; } static void prestera_lag_destroy(struct prestera_switch *sw, struct prestera_lag *lag) { WARN_ON(!list_empty(&lag->members)); lag->member_count = 0; lag->dev = NULL; } static int prestera_lag_port_add(struct prestera_port *port, struct net_device *lag_dev) { struct prestera_switch *sw = port->sw; struct prestera_lag *lag; int err; lag = prestera_lag_by_dev(sw, lag_dev); if (!lag) { lag = prestera_lag_create(sw, lag_dev); if (!lag) return -ENOSPC; } if (lag->member_count >= sw->lag_member_max) return -ENOSPC; err = prestera_hw_lag_member_add(port, lag->lag_id); if (err) { if (!lag->member_count) prestera_lag_destroy(sw, lag); return err; } list_add(&port->lag_member, &lag->members); lag->member_count++; port->lag = lag; return 0; } static int prestera_lag_port_del(struct prestera_port *port) { struct prestera_switch *sw = port->sw; struct prestera_lag *lag = port->lag; int err; if (!lag || !lag->member_count) return -EINVAL; err = prestera_hw_lag_member_del(port, lag->lag_id); if (err) return err; list_del(&port->lag_member); lag->member_count--; port->lag = NULL; if (netif_is_bridge_port(lag->dev)) { struct net_device *br_dev; br_dev = netdev_master_upper_dev_get(lag->dev); prestera_bridge_port_leave(br_dev, port); } if (!lag->member_count) prestera_lag_destroy(sw, lag); return 0; } bool prestera_port_is_lag_member(const struct prestera_port *port) { return !!port->lag; } u16 prestera_port_lag_id(const struct prestera_port *port) { return port->lag->lag_id; } static int prestera_lag_init(struct prestera_switch *sw) { u16 id; sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL); if (!sw->lags) return -ENOMEM; for (id = 0; id < sw->lag_max; id++) sw->lags[id].lag_id = id; return 0; } static void prestera_lag_fini(struct prestera_switch *sw) { u8 idx; for (idx = 0; idx < sw->lag_max; idx++) WARN_ON(sw->lags[idx].member_count); kfree(sw->lags); } bool prestera_netdev_check(const struct net_device *dev) { return dev->netdev_ops == &prestera_netdev_ops; } static int prestera_lower_dev_walk(struct net_device *dev, struct netdev_nested_priv *priv) { struct prestera_port **pport = (struct prestera_port **)priv->data; if (prestera_netdev_check(dev)) { *pport = netdev_priv(dev); return 1; } return 0; } struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev) { struct prestera_port *port = NULL; struct netdev_nested_priv priv = { .data = (void *)&port, }; if (prestera_netdev_check(dev)) return netdev_priv(dev); netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv); return port; } static int prestera_netdev_port_lower_event(struct net_device *dev, unsigned long event, void *ptr) { struct netdev_notifier_changelowerstate_info *info = ptr; struct netdev_lag_lower_state_info *lower_state_info; struct prestera_port *port = netdev_priv(dev); bool enabled; if (!netif_is_lag_port(dev)) return 0; if (!prestera_port_is_lag_member(port)) return 0; lower_state_info = info->lower_state_info; enabled = lower_state_info->link_up && lower_state_info->tx_enabled; return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled); } static bool prestera_lag_master_check(struct net_device *lag_dev, struct netdev_lag_upper_info *info, struct netlink_ext_ack *ext_ack) { if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type"); return false; } return true; } static int prestera_netdev_port_event(struct net_device *lower, struct net_device *dev, unsigned long event, void *ptr) { struct netdev_notifier_info *info = ptr; struct netdev_notifier_changeupper_info *cu_info; struct prestera_port *port = netdev_priv(dev); struct netlink_ext_ack *extack; struct net_device *upper; extack = netdev_notifier_info_to_extack(info); cu_info = container_of(info, struct netdev_notifier_changeupper_info, info); switch (event) { case NETDEV_PRECHANGEUPPER: upper = cu_info->upper_dev; if (!netif_is_bridge_master(upper) && !netif_is_lag_master(upper)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!cu_info->linking) break; if (netdev_has_any_upper_dev(upper)) { NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved"); return -EINVAL; } if (netif_is_lag_master(upper) && !prestera_lag_master_check(upper, cu_info->upper_info, extack)) return -EOPNOTSUPP; if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and port has a VLAN"); return -EINVAL; } if (netif_is_lag_port(dev) && is_vlan_dev(upper) && !netif_is_lag_master(vlan_dev_real_dev(upper))) { NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } break; case NETDEV_CHANGEUPPER: upper = cu_info->upper_dev; if (netif_is_bridge_master(upper)) { if (cu_info->linking) return prestera_bridge_port_join(upper, port, extack); else prestera_bridge_port_leave(upper, port); } else if (netif_is_lag_master(upper)) { if (cu_info->linking) return prestera_lag_port_add(port, upper); else prestera_lag_port_del(port); } break; case NETDEV_CHANGELOWERSTATE: return prestera_netdev_port_lower_event(dev, event, ptr); } return 0; } static int prestera_netdevice_lag_event(struct net_device *lag_dev, unsigned long event, void *ptr) { struct net_device *dev; struct list_head *iter; int err; netdev_for_each_lower_dev(lag_dev, dev, iter) { if (prestera_netdev_check(dev)) { err = prestera_netdev_port_event(lag_dev, dev, event, ptr); if (err) return err; } } return 0; } static int prestera_netdev_event_handler(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); int err = 0; if (prestera_netdev_check(dev)) err = prestera_netdev_port_event(dev, dev, event, ptr); else if (netif_is_lag_master(dev)) err = prestera_netdevice_lag_event(dev, event, ptr); return notifier_from_errno(err); } struct prestera_mdb_entry * prestera_mdb_entry_create(struct prestera_switch *sw, const unsigned char *addr, u16 vid) { struct prestera_flood_domain *flood_domain; struct prestera_mdb_entry *mdb_entry; mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); if (!mdb_entry) goto err_mdb_alloc; flood_domain = prestera_flood_domain_create(sw); if (!flood_domain) goto err_flood_domain_create; mdb_entry->sw = sw; mdb_entry->vid = vid; mdb_entry->flood_domain = flood_domain; ether_addr_copy(mdb_entry->addr, addr); if (prestera_hw_mdb_create(mdb_entry)) goto err_mdb_hw_create; return mdb_entry; err_mdb_hw_create: prestera_flood_domain_destroy(flood_domain); err_flood_domain_create: kfree(mdb_entry); err_mdb_alloc: return NULL; } void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry) { prestera_hw_mdb_destroy(mdb_entry); prestera_flood_domain_destroy(mdb_entry->flood_domain); kfree(mdb_entry); } struct prestera_flood_domain * prestera_flood_domain_create(struct prestera_switch *sw) { struct prestera_flood_domain *domain; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; domain->sw = sw; if (prestera_hw_flood_domain_create(domain)) { kfree(domain); return NULL; } INIT_LIST_HEAD(&domain->flood_domain_port_list); return domain; } void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain) { WARN_ON(!list_empty(&flood_domain->flood_domain_port_list)); WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain)); kfree(flood_domain); } int prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain, struct net_device *dev, u16 vid) { struct prestera_flood_domain_port *flood_domain_port; bool is_first_port_in_list = false; int err; flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL); if (!flood_domain_port) { err = -ENOMEM; goto err_port_alloc; } flood_domain_port->vid = vid; if (list_empty(&flood_domain->flood_domain_port_list)) is_first_port_in_list = true; list_add(&flood_domain_port->flood_domain_port_node, &flood_domain->flood_domain_port_list); flood_domain_port->flood_domain = flood_domain; flood_domain_port->dev = dev; if (!is_first_port_in_list) { err = prestera_hw_flood_domain_ports_reset(flood_domain); if (err) goto err_prestera_mdb_port_create_hw; } err = prestera_hw_flood_domain_ports_set(flood_domain); if (err) goto err_prestera_mdb_port_create_hw; return 0; err_prestera_mdb_port_create_hw: list_del(&flood_domain_port->flood_domain_port_node); kfree(flood_domain_port); err_port_alloc: return err; } void prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port) { struct prestera_flood_domain *flood_domain = port->flood_domain; list_del(&port->flood_domain_port_node); WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain)); if (!list_empty(&flood_domain->flood_domain_port_list)) WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain)); kfree(port); } struct prestera_flood_domain_port * prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain, struct net_device *dev, u16 vid) { struct prestera_flood_domain_port *flood_domain_port; list_for_each_entry(flood_domain_port, &flood_domain->flood_domain_port_list, flood_domain_port_node) if (flood_domain_port->dev == dev && vid == flood_domain_port->vid) return flood_domain_port; return NULL; } static int prestera_netdev_event_handler_register(struct prestera_switch *sw) { sw->netdev_nb.notifier_call = prestera_netdev_event_handler; return register_netdevice_notifier(&sw->netdev_nb); } static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw) { unregister_netdevice_notifier(&sw->netdev_nb); } static int prestera_switch_init(struct prestera_switch *sw) { int err; sw->np = sw->dev->dev->of_node; err = prestera_hw_switch_init(sw); if (err) { dev_err(prestera_dev(sw), "Failed to init Switch device\n"); return err; } rwlock_init(&sw->port_list_lock); INIT_LIST_HEAD(&sw->port_list); err = prestera_switch_set_base_mac_addr(sw); if (err) return err; err = prestera_netdev_event_handler_register(sw); if (err) return err; err = prestera_router_init(sw); if (err) goto err_router_init; err = prestera_switchdev_init(sw); if (err) goto err_swdev_register; err = prestera_rxtx_switch_init(sw); if (err) goto err_rxtx_register; err = prestera_event_handlers_register(sw); if (err) goto err_handlers_register; err = prestera_counter_init(sw); if (err) goto err_counter_init; err = prestera_acl_init(sw); if (err) goto err_acl_init; err = prestera_span_init(sw); if (err) goto err_span_init; err = prestera_devlink_traps_register(sw); if (err) goto err_dl_register; err = prestera_lag_init(sw); if (err) goto err_lag_init; err = prestera_create_ports(sw); if (err) goto err_ports_create; prestera_devlink_register(sw); return 0; err_ports_create: prestera_lag_fini(sw); err_lag_init: prestera_devlink_traps_unregister(sw); err_dl_register: prestera_span_fini(sw); err_span_init: prestera_acl_fini(sw); err_acl_init: prestera_counter_fini(sw); err_counter_init: prestera_event_handlers_unregister(sw); err_handlers_register: prestera_rxtx_switch_fini(sw); err_rxtx_register: prestera_switchdev_fini(sw); err_swdev_register: prestera_router_fini(sw); err_router_init: prestera_netdev_event_handler_unregister(sw); prestera_hw_switch_fini(sw); return err; } static void prestera_switch_fini(struct prestera_switch *sw) { prestera_devlink_unregister(sw); prestera_destroy_ports(sw); prestera_lag_fini(sw); prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); prestera_counter_fini(sw); prestera_event_handlers_unregister(sw); prestera_rxtx_switch_fini(sw); prestera_switchdev_fini(sw); prestera_router_fini(sw); prestera_netdev_event_handler_unregister(sw); prestera_hw_switch_fini(sw); of_node_put(sw->np); } int prestera_device_register(struct prestera_device *dev) { struct prestera_switch *sw; int err; sw = prestera_devlink_alloc(dev); if (!sw) return -ENOMEM; dev->priv = sw; sw->dev = dev; err = prestera_switch_init(sw); if (err) { prestera_devlink_free(sw); return err; } return 0; } EXPORT_SYMBOL(prestera_device_register); void prestera_device_unregister(struct prestera_device *dev) { struct prestera_switch *sw = dev->priv; prestera_switch_fini(sw); prestera_devlink_free(sw); } EXPORT_SYMBOL(prestera_device_unregister); static int __init prestera_module_init(void) { prestera_wq = alloc_workqueue("prestera", 0, 0); if (!prestera_wq) return -ENOMEM; prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0); if (!prestera_owq) { destroy_workqueue(prestera_wq); return -ENOMEM; } return 0; } static void __exit prestera_module_exit(void) { destroy_workqueue(prestera_wq); destroy_workqueue(prestera_owq); } module_init(prestera_module_init); module_exit(prestera_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Marvell Prestera switch driver");
linux-master
drivers/net/ethernet/marvell/prestera/prestera_main.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */ #include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <linux/ethtool.h> #include <linux/list.h> #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" #include "prestera_counter.h" #include "prestera_router_hw.h" #define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) #define PRESTERA_MIN_MTU 64 #define PRESTERA_MSG_CHUNK_SIZE 1024 enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2, PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100, PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101, PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110, PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200, PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201, PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202, PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203, PRESTERA_CMD_TYPE_FDB_ADD = 0x300, PRESTERA_CMD_TYPE_FDB_DELETE = 0x301, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312, PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400, PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, PRESTERA_CMD_TYPE_COUNTER_GET = 0x510, PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511, PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514, PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515, PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540, PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703, PRESTERA_CMD_TYPE_MDB_CREATE = 0x704, PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705, PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901, PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902, PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903, PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000, PRESTERA_CMD_TYPE_SPAN_GET = 0x1100, PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101, PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102, PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103, PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104, PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105, PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500, PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501, PRESTERA_CMD_TYPE_POLICER_SET = 0x1502, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000, PRESTERA_CMD_TYPE_ACK = 0x10000, PRESTERA_CMD_TYPE_MAX }; enum { PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1, PRESTERA_CMD_PORT_ATTR_MTU = 3, PRESTERA_CMD_PORT_ATTR_MAC = 4, PRESTERA_CMD_PORT_ATTR_SPEED = 5, PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6, PRESTERA_CMD_PORT_ATTR_LEARNING = 7, PRESTERA_CMD_PORT_ATTR_FLOOD = 8, PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9, PRESTERA_CMD_PORT_ATTR_LOCKED = 10, PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12, PRESTERA_CMD_PORT_ATTR_TYPE = 13, PRESTERA_CMD_PORT_ATTR_STATS = 17, PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18, PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19, PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22, }; enum { PRESTERA_CMD_SWITCH_ATTR_MAC = 1, PRESTERA_CMD_SWITCH_ATTR_AGEING = 2, }; enum { PRESTERA_CMD_ACK_OK, PRESTERA_CMD_ACK_FAILED, PRESTERA_CMD_ACK_MAX }; enum { PRESTERA_PORT_TP_NA, PRESTERA_PORT_TP_MDI, PRESTERA_PORT_TP_MDIX, PRESTERA_PORT_TP_AUTO, }; enum { PRESTERA_PORT_FLOOD_TYPE_UC = 0, PRESTERA_PORT_FLOOD_TYPE_MC = 1, }; enum { PRESTERA_PORT_GOOD_OCTETS_RCV_CNT, PRESTERA_PORT_BAD_OCTETS_RCV_CNT, PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT, PRESTERA_PORT_BRDC_PKTS_RCV_CNT, PRESTERA_PORT_MC_PKTS_RCV_CNT, PRESTERA_PORT_PKTS_64L_CNT, PRESTERA_PORT_PKTS_65TO127L_CNT, PRESTERA_PORT_PKTS_128TO255L_CNT, PRESTERA_PORT_PKTS_256TO511L_CNT, PRESTERA_PORT_PKTS_512TO1023L_CNT, PRESTERA_PORT_PKTS_1024TOMAXL_CNT, PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT, PRESTERA_PORT_MC_PKTS_SENT_CNT, PRESTERA_PORT_BRDC_PKTS_SENT_CNT, PRESTERA_PORT_FC_SENT_CNT, PRESTERA_PORT_GOOD_FC_RCV_CNT, PRESTERA_PORT_DROP_EVENTS_CNT, PRESTERA_PORT_UNDERSIZE_PKTS_CNT, PRESTERA_PORT_FRAGMENTS_PKTS_CNT, PRESTERA_PORT_OVERSIZE_PKTS_CNT, PRESTERA_PORT_JABBER_PKTS_CNT, PRESTERA_PORT_MAC_RCV_ERROR_CNT, PRESTERA_PORT_BAD_CRC_CNT, PRESTERA_PORT_COLLISIONS_CNT, PRESTERA_PORT_LATE_COLLISIONS_CNT, PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT, PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT, PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT, PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT, PRESTERA_PORT_GOOD_OCTETS_SENT_CNT, PRESTERA_PORT_CNT_MAX }; enum { PRESTERA_FC_NONE, PRESTERA_FC_SYMMETRIC, PRESTERA_FC_ASYMMETRIC, PRESTERA_FC_SYMM_ASYMM, }; enum { PRESTERA_POLICER_MODE_SR_TCM }; enum { PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0, PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1, PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2, }; struct prestera_fw_event_handler { struct list_head list; struct rcu_head rcu; enum prestera_event_type type; prestera_event_cb_t func; void *arg; }; enum { PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0, PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1, PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2, }; struct prestera_msg_cmd { __le32 type; }; struct prestera_msg_ret { struct prestera_msg_cmd cmd; __le32 status; }; struct prestera_msg_common_req { struct prestera_msg_cmd cmd; }; struct prestera_msg_common_resp { struct prestera_msg_ret ret; }; struct prestera_msg_switch_attr_req { struct prestera_msg_cmd cmd; __le32 attr; union { __le32 ageing_timeout_ms; struct { u8 mac[ETH_ALEN]; u8 __pad[2]; }; } param; }; struct prestera_msg_switch_init_resp { struct prestera_msg_ret ret; __le32 port_count; __le32 mtu_max; __le32 size_tbl_router_nexthop; u8 switch_id; u8 lag_max; u8 lag_member_max; }; struct prestera_msg_event_port_param { union { struct { __le32 mode; __le32 speed; u8 oper; u8 duplex; u8 fc; u8 fec; } mac; struct { __le64 lmode_bmap; u8 mdix; u8 fc; u8 __pad[2]; } __packed phy; /* make sure always 12 bytes size */ }; }; struct prestera_msg_port_cap_param { __le64 link_mode; u8 type; u8 fec; u8 fc; u8 transceiver; }; struct prestera_msg_port_flood_param { u8 type; u8 enable; u8 __pad[2]; }; union prestera_msg_port_param { __le32 mtu; __le32 speed; __le32 link_mode; u8 admin_state; u8 oper_state; u8 mac[ETH_ALEN]; u8 accept_frm_type; u8 learning; u8 flood; u8 type; u8 duplex; u8 fec; u8 fc; u8 br_locked; union { struct { u8 admin; u8 fc; u8 ap_enable; u8 __reserved[5]; union { struct { __le32 mode; __le32 speed; u8 inband; u8 duplex; u8 fec; u8 fec_supp; } reg_mode; struct { __le32 mode; __le32 speed; u8 fec; u8 fec_supp; u8 __pad[2]; } ap_modes[PRESTERA_AP_PORT_MAX]; }; } mac; struct { __le64 modes; __le32 mode; u8 admin; u8 adv_enable; u8 mdix; u8 __pad; } phy; } link; struct prestera_msg_port_cap_param cap; struct prestera_msg_port_flood_param flood_ext; struct prestera_msg_event_port_param link_evt; }; struct prestera_msg_port_attr_req { struct prestera_msg_cmd cmd; __le32 attr; __le32 port; __le32 dev; union prestera_msg_port_param param; }; struct prestera_msg_port_attr_resp { struct prestera_msg_ret ret; union prestera_msg_port_param param; }; struct prestera_msg_port_stats_resp { struct prestera_msg_ret ret; __le64 stats[PRESTERA_PORT_CNT_MAX]; }; struct prestera_msg_port_info_req { struct prestera_msg_cmd cmd; __le32 port; }; struct prestera_msg_port_info_resp { struct prestera_msg_ret ret; __le32 hw_id; __le32 dev_id; __le16 fp_id; u8 pad[2]; }; struct prestera_msg_vlan_req { struct prestera_msg_cmd cmd; __le32 port; __le32 dev; __le16 vid; u8 is_member; u8 is_tagged; }; struct prestera_msg_fdb_req { struct prestera_msg_cmd cmd; __le32 flush_mode; union { struct { __le32 port; __le32 dev; }; __le16 lag_id; } dest; __le16 vid; u8 dest_type; u8 dynamic; u8 mac[ETH_ALEN]; u8 __pad[2]; }; struct prestera_msg_bridge_req { struct prestera_msg_cmd cmd; __le32 port; __le32 dev; __le16 bridge; u8 pad[2]; }; struct prestera_msg_bridge_resp { struct prestera_msg_ret ret; __le16 bridge; u8 pad[2]; }; struct prestera_msg_vtcam_create_req { struct prestera_msg_cmd cmd; __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; u8 direction; u8 lookup; u8 pad[2]; }; struct prestera_msg_vtcam_destroy_req { struct prestera_msg_cmd cmd; __le32 vtcam_id; }; struct prestera_msg_vtcam_rule_add_req { struct prestera_msg_cmd cmd; __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; __le32 vtcam_id; __le32 prio; __le32 n_act; }; struct prestera_msg_vtcam_rule_del_req { struct prestera_msg_cmd cmd; __le32 vtcam_id; __le32 id; }; struct prestera_msg_vtcam_bind_req { struct prestera_msg_cmd cmd; union { struct { __le32 hw_id; __le32 dev_id; } port; __le32 index; }; __le32 vtcam_id; __le16 pcl_id; __le16 type; }; struct prestera_msg_vtcam_resp { struct prestera_msg_ret ret; __le32 vtcam_id; __le32 rule_id; }; struct prestera_msg_acl_action { __le32 id; __le32 __reserved; union { struct { __le32 index; } jump; struct { __le32 id; } police; struct { __le32 id; } count; __le32 reserved[6]; }; }; struct prestera_msg_counter_req { struct prestera_msg_cmd cmd; __le32 client; __le32 block_id; __le32 num_counters; }; struct prestera_msg_counter_stats { __le64 packets; __le64 bytes; }; struct prestera_msg_counter_resp { struct prestera_msg_ret ret; __le32 block_id; __le32 offset; __le32 num_counters; __le32 done; struct prestera_msg_counter_stats stats[]; }; struct prestera_msg_span_req { struct prestera_msg_cmd cmd; __le32 port; __le32 dev; u8 id; u8 pad[3]; }; struct prestera_msg_span_resp { struct prestera_msg_ret ret; u8 id; u8 pad[3]; }; struct prestera_msg_stp_req { struct prestera_msg_cmd cmd; __le32 port; __le32 dev; __le16 vid; u8 state; u8 __pad; }; struct prestera_msg_rxtx_req { struct prestera_msg_cmd cmd; u8 use_sdma; u8 pad[3]; }; struct prestera_msg_rxtx_resp { struct prestera_msg_ret ret; __le32 map_addr; }; struct prestera_msg_iface { union { struct { __le32 dev; __le32 port; }; __le16 lag_id; }; __le16 vr_id; __le16 vid; u8 type; u8 __pad[3]; }; struct prestera_msg_ip_addr { union { __be32 ipv4; __be32 ipv6[4]; } u; u8 v; /* e.g. PRESTERA_IPV4 */ u8 __pad[3]; }; struct prestera_msg_nh { struct prestera_msg_iface oif; __le32 hw_id; u8 mac[ETH_ALEN]; u8 is_active; u8 pad; }; struct prestera_msg_rif_req { struct prestera_msg_cmd cmd; struct prestera_msg_iface iif; __le32 mtu; __le16 rif_id; __le16 __reserved; u8 mac[ETH_ALEN]; u8 __pad[2]; }; struct prestera_msg_rif_resp { struct prestera_msg_ret ret; __le16 rif_id; u8 __pad[2]; }; struct prestera_msg_lpm_req { struct prestera_msg_cmd cmd; struct prestera_msg_ip_addr dst; __le32 grp_id; __le32 dst_len; __le16 vr_id; u8 __pad[2]; }; struct prestera_msg_nh_req { struct prestera_msg_cmd cmd; struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX]; __le32 size; __le32 grp_id; }; struct prestera_msg_nh_chunk_req { struct prestera_msg_cmd cmd; __le32 offset; }; struct prestera_msg_nh_chunk_resp { struct prestera_msg_ret ret; u8 hw_state[PRESTERA_MSG_CHUNK_SIZE]; }; struct prestera_msg_nh_grp_req { struct prestera_msg_cmd cmd; __le32 grp_id; __le32 size; }; struct prestera_msg_nh_grp_resp { struct prestera_msg_ret ret; __le32 grp_id; }; struct prestera_msg_vr_req { struct prestera_msg_cmd cmd; __le16 vr_id; u8 __pad[2]; }; struct prestera_msg_vr_resp { struct prestera_msg_ret ret; __le16 vr_id; u8 __pad[2]; }; struct prestera_msg_lag_req { struct prestera_msg_cmd cmd; __le32 port; __le32 dev; __le16 lag_id; u8 pad[2]; }; struct prestera_msg_cpu_code_counter_req { struct prestera_msg_cmd cmd; u8 counter_type; u8 code; u8 pad[2]; }; struct mvsw_msg_cpu_code_counter_ret { struct prestera_msg_ret ret; __le64 packet_count; }; struct prestera_msg_policer_req { struct prestera_msg_cmd cmd; __le32 id; union { struct { __le64 cir; __le32 cbs; } __packed sr_tcm; /* make sure always 12 bytes size */ __le32 reserved[6]; }; u8 mode; u8 type; u8 pad[2]; }; struct prestera_msg_policer_resp { struct prestera_msg_ret ret; __le32 id; }; struct prestera_msg_event { __le16 type; __le16 id; }; struct prestera_msg_event_port { struct prestera_msg_event id; __le32 port_id; struct prestera_msg_event_port_param param; }; union prestera_msg_event_fdb_param { u8 mac[ETH_ALEN]; }; struct prestera_msg_event_fdb { struct prestera_msg_event id; __le32 vid; union { __le32 port_id; __le16 lag_id; } dest; union prestera_msg_event_fdb_param param; u8 dest_type; }; struct prestera_msg_flood_domain_create_req { struct prestera_msg_cmd cmd; }; struct prestera_msg_flood_domain_create_resp { struct prestera_msg_ret ret; __le32 flood_domain_idx; }; struct prestera_msg_flood_domain_destroy_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; }; struct prestera_msg_flood_domain_ports_set_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; __le32 ports_num; }; struct prestera_msg_flood_domain_ports_reset_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; }; struct prestera_msg_flood_domain_port { union { struct { __le32 port_num; __le32 dev_num; }; __le16 lag_id; }; __le16 vid; __le16 port_type; }; struct prestera_msg_mdb_create_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; __le16 vid; u8 mac[ETH_ALEN]; }; struct prestera_msg_mdb_destroy_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; __le16 vid; u8 mac[ETH_ALEN]; }; static void prestera_hw_build_tests(void) { /* check requests */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4); BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 144); BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32); BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124); BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12); /* structure that are part of req/resp fw messages */ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28); /* check responses */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24); BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 136); BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248); BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032); BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20); } static u8 prestera_hw_mdix_to_eth(u8 mode); static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause); static int __prestera_cmd_ret(struct prestera_switch *sw, enum prestera_cmd_type_t type, struct prestera_msg_cmd *cmd, size_t clen, struct prestera_msg_ret *ret, size_t rlen, int waitms) { struct prestera_device *dev = sw->dev; int err; cmd->type = __cpu_to_le32(type); err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms); if (err) return err; if (ret->cmd.type != __cpu_to_le32(PRESTERA_CMD_TYPE_ACK)) return -EBADE; if (ret->status != __cpu_to_le32(PRESTERA_CMD_ACK_OK)) return -EINVAL; return 0; } static int prestera_cmd_ret(struct prestera_switch *sw, enum prestera_cmd_type_t type, struct prestera_msg_cmd *cmd, size_t clen, struct prestera_msg_ret *ret, size_t rlen) { return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0); } static int prestera_cmd_ret_wait(struct prestera_switch *sw, enum prestera_cmd_type_t type, struct prestera_msg_cmd *cmd, size_t clen, struct prestera_msg_ret *ret, size_t rlen, int waitms) { return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms); } static int prestera_cmd(struct prestera_switch *sw, enum prestera_cmd_type_t type, struct prestera_msg_cmd *cmd, size_t clen) { struct prestera_msg_common_resp resp; return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp)); } static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt) { struct prestera_msg_event_port *hw_evt; hw_evt = (struct prestera_msg_event_port *)msg; evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id); if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) { evt->port_evt.data.mac.oper = hw_evt->param.mac.oper; evt->port_evt.data.mac.mode = __le32_to_cpu(hw_evt->param.mac.mode); evt->port_evt.data.mac.speed = __le32_to_cpu(hw_evt->param.mac.speed); evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex; evt->port_evt.data.mac.fc = hw_evt->param.mac.fc; evt->port_evt.data.mac.fec = hw_evt->param.mac.fec; } else { return -EINVAL; } return 0; } static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt) { struct prestera_msg_event_fdb *hw_evt = msg; switch (hw_evt->dest_type) { case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT: evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT; evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id); break; case PRESTERA_HW_FDB_ENTRY_TYPE_LAG: evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG; evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id); break; default: return -EINVAL; } evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid); ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac); return 0; } static struct prestera_fw_evt_parser { int (*func)(void *msg, struct prestera_event *evt); } fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = { [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt }, [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt }, }; static struct prestera_fw_event_handler * __find_event_handler(const struct prestera_switch *sw, enum prestera_event_type type) { struct prestera_fw_event_handler *eh; list_for_each_entry_rcu(eh, &sw->event_handlers, list) { if (eh->type == type) return eh; } return NULL; } static int prestera_find_event_handler(const struct prestera_switch *sw, enum prestera_event_type type, struct prestera_fw_event_handler *eh) { struct prestera_fw_event_handler *tmp; int err = 0; rcu_read_lock(); tmp = __find_event_handler(sw, type); if (tmp) *eh = *tmp; else err = -ENOENT; rcu_read_unlock(); return err; } static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size) { struct prestera_switch *sw = dev->priv; struct prestera_msg_event *msg = buf; struct prestera_fw_event_handler eh; struct prestera_event evt; u16 msg_type; int err; msg_type = __le16_to_cpu(msg->type); if (msg_type >= PRESTERA_EVENT_TYPE_MAX) return -EINVAL; if (!fw_event_parsers[msg_type].func) return -ENOENT; err = prestera_find_event_handler(sw, msg_type, &eh); if (err) return err; evt.id = __le16_to_cpu(msg->id); err = fw_event_parsers[msg_type].func(buf, &evt); if (err) return err; eh.func(sw, &evt, eh.arg); return 0; } static void prestera_pkt_recv(struct prestera_device *dev) { struct prestera_switch *sw = dev->priv; struct prestera_fw_event_handler eh; struct prestera_event ev; int err; ev.id = PRESTERA_RXTX_EVENT_RCV_PKT; err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh); if (err) return; eh.func(sw, &ev, eh.arg); } static u8 prestera_hw_mdix_to_eth(u8 mode) { switch (mode) { case PRESTERA_PORT_TP_MDI: return ETH_TP_MDI; case PRESTERA_PORT_TP_MDIX: return ETH_TP_MDI_X; case PRESTERA_PORT_TP_AUTO: return ETH_TP_MDI_AUTO; default: return ETH_TP_MDI_INVALID; } } static u8 prestera_hw_mdix_from_eth(u8 mode) { switch (mode) { case ETH_TP_MDI: return PRESTERA_PORT_TP_MDI; case ETH_TP_MDI_X: return PRESTERA_PORT_TP_MDIX; case ETH_TP_MDI_AUTO: return PRESTERA_PORT_TP_AUTO; default: return PRESTERA_PORT_TP_NA; } } int prestera_hw_port_info_get(const struct prestera_port *port, u32 *dev_id, u32 *hw_id, u16 *fp_id) { struct prestera_msg_port_info_req req = { .port = __cpu_to_le32(port->id), }; struct prestera_msg_port_info_resp resp; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *dev_id = __le32_to_cpu(resp.dev_id); *hw_id = __le32_to_cpu(resp.hw_id); *fp_id = __le16_to_cpu(resp.fp_id); return 0; } int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac) { struct prestera_msg_switch_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC), }; ether_addr_copy(req.param.mac, mac); return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_switch_init(struct prestera_switch *sw) { struct prestera_msg_switch_init_resp resp; struct prestera_msg_common_req req; int err; INIT_LIST_HEAD(&sw->event_handlers); prestera_hw_build_tests(); err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT, &req.cmd, sizeof(req), &resp.ret, sizeof(resp), PRESTERA_SWITCH_INIT_TIMEOUT_MS); if (err) return err; sw->dev->recv_msg = prestera_evt_recv; sw->dev->recv_pkt = prestera_pkt_recv; sw->port_count = __le32_to_cpu(resp.port_count); sw->mtu_min = PRESTERA_MIN_MTU; sw->mtu_max = __le32_to_cpu(resp.mtu_max); sw->id = resp.switch_id; sw->lag_member_max = resp.lag_member_max; sw->lag_max = resp.lag_max; sw->size_tbl_router_nexthop = __le32_to_cpu(resp.size_tbl_router_nexthop); return 0; } void prestera_hw_switch_fini(struct prestera_switch *sw) { WARN_ON(!list_empty(&sw->event_handlers)); } int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms) { struct prestera_msg_switch_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING), .param = { .ageing_timeout_ms = __cpu_to_le32(ageing_ms), }, }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_mac_mode_get(const struct prestera_port *port, u32 *mode, u32 *speed, u8 *duplex, u8 *fec) { struct prestera_msg_port_attr_resp resp; struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id) }; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; if (mode) *mode = __le32_to_cpu(resp.param.link_evt.mac.mode); if (speed) *speed = __le32_to_cpu(resp.param.link_evt.mac.speed); if (duplex) *duplex = resp.param.link_evt.mac.duplex; if (fec) *fec = resp.param.link_evt.mac.fec; return err; } int prestera_hw_port_mac_mode_set(const struct prestera_port *port, bool admin, u32 mode, u8 inband, u32 speed, u8 duplex, u8 fec) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .link = { .mac = { .admin = admin, .reg_mode.mode = __cpu_to_le32(mode), .reg_mode.inband = inband, .reg_mode.speed = __cpu_to_le32(speed), .reg_mode.duplex = duplex, .reg_mode.fec = fec } } } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_phy_mode_get(const struct prestera_port *port, u8 *mdix, u64 *lmode_bmap, bool *fc_pause, bool *fc_asym) { struct prestera_msg_port_attr_resp resp; struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id) }; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; if (mdix) *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix); if (lmode_bmap) *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap); if (fc_pause && fc_asym) prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc, fc_pause, fc_asym); return err; } int prestera_hw_port_phy_mode_set(const struct prestera_port *port, bool admin, bool adv, u32 mode, u64 modes, u8 mdix) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .link = { .phy = { .admin = admin, .adv_enable = adv ? 1 : 0, .mode = __cpu_to_le32(mode), .modes = __cpu_to_le64(modes), } } } }; req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix); return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .mtu = __cpu_to_le32(mtu), } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; ether_addr_copy(req.param.mac, mac); return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_accept_frm_type(struct prestera_port *port, enum prestera_accept_frm_type type) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .accept_frm_type = type, } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_cap_get(const struct prestera_port *port, struct prestera_port_caps *caps) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode); caps->transceiver = resp.param.cap.transceiver; caps->supp_fec = resp.param.cap.fec; caps->type = resp.param.cap.type; return err; } static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause) { switch (fc) { case PRESTERA_FC_SYMMETRIC: *pause = true; *asym_pause = false; break; case PRESTERA_FC_ASYMMETRIC: *pause = false; *asym_pause = true; break; case PRESTERA_FC_SYMM_ASYMM: *pause = true; *asym_pause = true; break; default: *pause = false; *asym_pause = false; } } int prestera_hw_vtcam_create(struct prestera_switch *sw, u8 lookup, const u32 *keymask, u32 *vtcam_id, enum prestera_hw_vtcam_direction_t dir) { int err; struct prestera_msg_vtcam_resp resp; struct prestera_msg_vtcam_create_req req = { .lookup = lookup, .direction = dir, }; if (keymask) memcpy(req.keymask, keymask, sizeof(req.keymask)); else memset(req.keymask, 0, sizeof(req.keymask)); err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *vtcam_id = __le32_to_cpu(resp.vtcam_id); return 0; } int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id) { struct prestera_msg_vtcam_destroy_req req = { .vtcam_id = __cpu_to_le32(vtcam_id), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY, &req.cmd, sizeof(req)); } static int prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, struct prestera_acl_hw_action_info *info) { action->id = __cpu_to_le32(info->id); switch (info->id) { case PRESTERA_ACL_RULE_ACTION_ACCEPT: case PRESTERA_ACL_RULE_ACTION_DROP: case PRESTERA_ACL_RULE_ACTION_TRAP: /* just rule action id, no specific data */ break; case PRESTERA_ACL_RULE_ACTION_JUMP: action->jump.index = __cpu_to_le32(info->jump.index); break; case PRESTERA_ACL_RULE_ACTION_POLICE: action->police.id = __cpu_to_le32(info->police.id); break; case PRESTERA_ACL_RULE_ACTION_COUNT: action->count.id = __cpu_to_le32(info->count.id); break; default: return -EINVAL; } return 0; } int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id, u32 prio, void *key, void *keymask, struct prestera_acl_hw_action_info *act, u8 n_act, u32 *rule_id) { struct prestera_msg_acl_action *actions_msg; struct prestera_msg_vtcam_rule_add_req *req; struct prestera_msg_vtcam_resp resp; void *buff; u32 size; int err; u8 i; size = sizeof(*req) + sizeof(*actions_msg) * n_act; buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; req = buff; req->n_act = __cpu_to_le32(n_act); actions_msg = buff + sizeof(*req); /* put acl matches into the message */ memcpy(req->key, key, sizeof(req->key)); memcpy(req->keymask, keymask, sizeof(req->keymask)); /* put acl actions into the message */ for (i = 0; i < n_act; i++) { err = prestera_acl_rule_add_put_action(&actions_msg[i], &act[i]); if (err) goto free_buff; } req->vtcam_id = __cpu_to_le32(vtcam_id); req->prio = __cpu_to_le32(prio); err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD, &req->cmd, size, &resp.ret, sizeof(resp)); if (err) goto free_buff; *rule_id = __le32_to_cpu(resp.rule_id); free_buff: kfree(buff); return err; } int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, u32 vtcam_id, u32 rule_id) { struct prestera_msg_vtcam_rule_del_req req = { .vtcam_id = __cpu_to_le32(vtcam_id), .id = __cpu_to_le32(rule_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, struct prestera_acl_iface *iface, u32 vtcam_id, u16 pcl_id) { struct prestera_msg_vtcam_bind_req req = { .vtcam_id = __cpu_to_le32(vtcam_id), .type = __cpu_to_le16(iface->type), .pcl_id = __cpu_to_le16(pcl_id) }; if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { req.port.dev_id = __cpu_to_le32(iface->port->dev_id); req.port.hw_id = __cpu_to_le32(iface->port->hw_id); } else { req.index = __cpu_to_le32(iface->index); } return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND, &req.cmd, sizeof(req)); } int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, struct prestera_acl_iface *iface, u32 vtcam_id) { struct prestera_msg_vtcam_bind_req req = { .vtcam_id = __cpu_to_le32(vtcam_id), .type = __cpu_to_le16(iface->type) }; if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { req.port.dev_id = __cpu_to_le32(iface->port->dev_id); req.port.hw_id = __cpu_to_le32(iface->port->hw_id); } else { req.index = __cpu_to_le32(iface->index); } return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND, &req.cmd, sizeof(req)); } int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id) { struct prestera_msg_span_resp resp; struct prestera_msg_span_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *span_id = resp.id; return 0; } int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id, bool ingress) { struct prestera_msg_span_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .id = span_id, }; enum prestera_cmd_type_t cmd_type; if (ingress) cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND; else cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND; return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req)); } int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress) { struct prestera_msg_span_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; enum prestera_cmd_type_t cmd_type; if (ingress) cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND; else cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND; return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req)); } int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id) { struct prestera_msg_span_req req = { .id = span_id }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE, &req.cmd, sizeof(req)); } int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *type = resp.param.type; return 0; } int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_attr_resp resp; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *speed = __le32_to_cpu(resp.param.speed); return 0; } int prestera_hw_port_autoneg_restart(struct prestera_port *port) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_stats_get(const struct prestera_port *port, struct prestera_port_stats *st) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; struct prestera_msg_port_stats_resp resp; __le64 *hw = resp.stats; int err; err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; st->good_octets_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]); st->bad_octets_received = __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]); st->mac_trans_error = __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]); st->broadcast_frames_received = __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]); st->multicast_frames_received = __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]); st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]); st->frames_65_to_127_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]); st->frames_128_to_255_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]); st->frames_256_to_511_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]); st->frames_512_to_1023_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]); st->frames_1024_to_max_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]); st->excessive_collision = __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]); st->multicast_frames_sent = __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]); st->broadcast_frames_sent = __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]); st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]); st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]); st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]); st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]); st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]); st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]); st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]); st->rx_error_frame_received = __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]); st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]); st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]); st->late_collision = __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]); st->unicast_frames_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]); st->unicast_frames_sent = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]); st->sent_multiple = __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]); st->sent_deferred = __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]); st->good_octets_sent = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]); return 0; } int prestera_hw_port_learning_set(struct prestera_port *port, bool enable) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .learning = enable, } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .flood_ext = { .type = PRESTERA_PORT_FLOOD_TYPE_UC, .enable = flood, } } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .flood_ext = { .type = PRESTERA_PORT_FLOOD_TYPE_MC, .enable = flood, } } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_port_br_locked_set(const struct prestera_port *port, bool br_locked) { struct prestera_msg_port_attr_req req = { .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .param = { .br_locked = br_locked, } }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET, &req.cmd, sizeof(req)); } int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid) { struct prestera_msg_vlan_req req = { .vid = __cpu_to_le16(vid), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE, &req.cmd, sizeof(req)); } int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid) { struct prestera_msg_vlan_req req = { .vid = __cpu_to_le16(vid), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid, bool is_member, bool untagged) { struct prestera_msg_vlan_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .vid = __cpu_to_le16(vid), .is_member = is_member, .is_tagged = !untagged, }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET, &req.cmd, sizeof(req)); } int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid) { struct prestera_msg_vlan_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .vid = __cpu_to_le16(vid), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET, &req.cmd, sizeof(req)); } int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state) { struct prestera_msg_stp_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .vid = __cpu_to_le16(vid), .state = state, }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET, &req.cmd, sizeof(req)); } int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac, u16 vid, bool dynamic) { struct prestera_msg_fdb_req req = { .dest = { .dev = __cpu_to_le32(port->dev_id), .port = __cpu_to_le32(port->hw_id), }, .vid = __cpu_to_le16(vid), .dynamic = dynamic, }; ether_addr_copy(req.mac, mac); return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD, &req.cmd, sizeof(req)); } int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac, u16 vid) { struct prestera_msg_fdb_req req = { .dest = { .dev = __cpu_to_le32(port->dev_id), .port = __cpu_to_le32(port->hw_id), }, .vid = __cpu_to_le16(vid), }; ether_addr_copy(req.mac, mac); return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id, const unsigned char *mac, u16 vid, bool dynamic) { struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { .lag_id = __cpu_to_le16(lag_id), }, .vid = __cpu_to_le16(vid), .dynamic = dynamic, }; ether_addr_copy(req.mac, mac); return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD, &req.cmd, sizeof(req)); } int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id, const unsigned char *mac, u16 vid) { struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { .lag_id = __cpu_to_le16(lag_id), }, .vid = __cpu_to_le16(vid), }; ether_addr_copy(req.mac, mac); return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode) { struct prestera_msg_fdb_req req = { .dest = { .dev = __cpu_to_le32(port->dev_id), .port = __cpu_to_le32(port->hw_id), }, .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, &req.cmd, sizeof(req)); } int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode) { struct prestera_msg_fdb_req req = { .vid = __cpu_to_le16(vid), .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN, &req.cmd, sizeof(req)); } int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid, u32 mode) { struct prestera_msg_fdb_req req = { .dest = { .dev = __cpu_to_le32(port->dev_id), .port = __cpu_to_le32(port->hw_id), }, .vid = __cpu_to_le16(vid), .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, &req.cmd, sizeof(req)); } int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id, u32 mode) { struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { .lag_id = __cpu_to_le16(lag_id), }, .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT, &req.cmd, sizeof(req)); } int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw, u16 lag_id, u16 vid, u32 mode) { struct prestera_msg_fdb_req req = { .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG, .dest = { .lag_id = __cpu_to_le16(lag_id), }, .vid = __cpu_to_le16(vid), .flush_mode = __cpu_to_le32(mode), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN, &req.cmd, sizeof(req)); } int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id) { struct prestera_msg_bridge_resp resp; struct prestera_msg_bridge_req req; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *bridge_id = __le16_to_cpu(resp.bridge); return 0; } int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id) { struct prestera_msg_bridge_req req = { .bridge = __cpu_to_le16(bridge_id), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id) { struct prestera_msg_bridge_req req = { .bridge = __cpu_to_le16(bridge_id), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD, &req.cmd, sizeof(req)); } int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) { struct prestera_msg_bridge_req req = { .bridge = __cpu_to_le16(bridge_id), .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE, &req.cmd, sizeof(req)); } static int prestera_iface_to_msg(struct prestera_iface *iface, struct prestera_msg_iface *msg_if) { switch (iface->type) { case PRESTERA_IF_PORT_E: case PRESTERA_IF_VID_E: msg_if->port = __cpu_to_le32(iface->dev_port.port_num); msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num); break; case PRESTERA_IF_LAG_E: msg_if->lag_id = __cpu_to_le16(iface->lag_id); break; default: return -EOPNOTSUPP; } msg_if->vr_id = __cpu_to_le16(iface->vr_id); msg_if->vid = __cpu_to_le16(iface->vlan_id); msg_if->type = iface->type; return 0; } int prestera_hw_rif_create(struct prestera_switch *sw, struct prestera_iface *iif, u8 *mac, u16 *rif_id) { struct prestera_msg_rif_resp resp; struct prestera_msg_rif_req req; int err; memcpy(req.mac, mac, ETH_ALEN); err = prestera_iface_to_msg(iif, &req.iif); if (err) return err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *rif_id = __le16_to_cpu(resp.rif_id); return err; } int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, struct prestera_iface *iif) { struct prestera_msg_rif_req req = { .rif_id = __cpu_to_le16(rif_id), }; int err; err = prestera_iface_to_msg(iif, &req.iif); if (err) return err; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id) { struct prestera_msg_vr_resp resp; struct prestera_msg_vr_req req; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *vr_id = __le16_to_cpu(resp.vr_id); return err; } int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id) { struct prestera_msg_vr_req req = { .vr_id = __cpu_to_le16(vr_id), }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id, __be32 dst, u32 dst_len, u32 grp_id) { struct prestera_msg_lpm_req req = { .dst_len = __cpu_to_le32(dst_len), .vr_id = __cpu_to_le16(vr_id), .grp_id = __cpu_to_le32(grp_id), .dst.u.ipv4 = dst }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd, sizeof(req)); } int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id, __be32 dst, u32 dst_len) { struct prestera_msg_lpm_req req = { .dst_len = __cpu_to_le32(dst_len), .vr_id = __cpu_to_le16(vr_id), .dst.u.ipv4 = dst }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count, struct prestera_neigh_info *nhs, u32 grp_id) { struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count), .grp_id = __cpu_to_le32(grp_id) }; int i, err; for (i = 0; i < count; i++) { req.nh[i].is_active = nhs[i].connected; memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN); err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif); if (err) return err; } return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd, sizeof(req)); } int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw, u8 *hw_state, u32 buf_size /* Buffer in bytes */) { static struct prestera_msg_nh_chunk_resp resp; struct prestera_msg_nh_chunk_req req; u32 buf_offset; int err; memset(&hw_state[0], 0, buf_size); buf_offset = 0; while (1) { if (buf_offset >= buf_size) break; memset(&req, 0, sizeof(req)); req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; memcpy(&hw_state[buf_offset], &resp.hw_state[0], buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ? buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE); buf_offset += PRESTERA_MSG_CHUNK_SIZE; } return 0; } int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count, u32 *grp_id) { struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) }; struct prestera_msg_nh_grp_resp resp; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *grp_id = __le32_to_cpu(resp.grp_id); return err; } int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count, u32 grp_id) { struct prestera_msg_nh_grp_req req = { .grp_id = __cpu_to_le32(grp_id), .size = __cpu_to_le32(nh_count) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params) { struct prestera_msg_rxtx_resp resp; struct prestera_msg_rxtx_req req; int err; req.use_sdma = params->use_sdma; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; params->map_addr = __le32_to_cpu(resp.map_addr); return 0; } int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id) { struct prestera_msg_lag_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .lag_id = __cpu_to_le16(lag_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD, &req.cmd, sizeof(req)); } int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id) { struct prestera_msg_lag_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .lag_id = __cpu_to_le16(lag_id), }; return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE, &req.cmd, sizeof(req)); } int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id, bool enable) { struct prestera_msg_lag_req req = { .port = __cpu_to_le32(port->hw_id), .dev = __cpu_to_le32(port->dev_id), .lag_id = __cpu_to_le16(lag_id), }; u32 cmd; cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE : PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE; return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req)); } int prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, enum prestera_hw_cpu_code_cnt_t counter_type, u64 *packet_count) { struct prestera_msg_cpu_code_counter_req req = { .counter_type = counter_type, .code = code, }; struct mvsw_msg_cpu_code_counter_ret resp; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *packet_count = __le64_to_cpu(resp.packet_count); return 0; } int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, prestera_event_cb_t fn, void *arg) { struct prestera_fw_event_handler *eh; eh = __find_event_handler(sw, type); if (eh) return -EEXIST; eh = kmalloc(sizeof(*eh), GFP_KERNEL); if (!eh) return -ENOMEM; eh->type = type; eh->func = fn; eh->arg = arg; INIT_LIST_HEAD(&eh->list); list_add_rcu(&eh->list, &sw->event_handlers); return 0; } void prestera_hw_event_handler_unregister(struct prestera_switch *sw, enum prestera_event_type type, prestera_event_cb_t fn) { struct prestera_fw_event_handler *eh; eh = __find_event_handler(sw, type); if (!eh) return; list_del_rcu(&eh->list); kfree_rcu(eh, rcu); } int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id) { struct prestera_msg_counter_req req = { .block_id = __cpu_to_le32(block_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER, &req.cmd, sizeof(req)); } int prestera_hw_counter_abort(struct prestera_switch *sw) { struct prestera_msg_counter_req req; return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT, &req.cmd, sizeof(req)); } int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, u32 *len, bool *done, struct prestera_counter_stats *stats) { struct prestera_msg_counter_resp *resp; struct prestera_msg_counter_req req = { .block_id = __cpu_to_le32(idx), .num_counters = __cpu_to_le32(*len), }; size_t size = struct_size(resp, stats, *len); int err, i; resp = kmalloc(size, GFP_KERNEL); if (!resp) return -ENOMEM; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET, &req.cmd, sizeof(req), &resp->ret, size); if (err) goto free_buff; for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) { stats[i].packets += __le64_to_cpu(resp->stats[i].packets); stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes); } *len = __le32_to_cpu(resp->num_counters); *done = __le32_to_cpu(resp->done); free_buff: kfree(resp); return err; } int prestera_hw_counter_block_get(struct prestera_switch *sw, u32 client, u32 *block_id, u32 *offset, u32 *num_counters) { struct prestera_msg_counter_resp resp; struct prestera_msg_counter_req req = { .client = __cpu_to_le32(client) }; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *block_id = __le32_to_cpu(resp.block_id); *offset = __le32_to_cpu(resp.offset); *num_counters = __le32_to_cpu(resp.num_counters); return 0; } int prestera_hw_counter_block_release(struct prestera_switch *sw, u32 block_id) { struct prestera_msg_counter_req req = { .block_id = __cpu_to_le32(block_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE, &req.cmd, sizeof(req)); } int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, u32 counter_id) { struct prestera_msg_counter_req req = { .block_id = __cpu_to_le32(block_id), .num_counters = __cpu_to_le32(counter_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, &req.cmd, sizeof(req)); } int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, u32 *policer_id) { struct prestera_msg_policer_resp resp; struct prestera_msg_policer_req req = { .type = type }; int err; err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; *policer_id = __le32_to_cpu(resp.id); return 0; } int prestera_hw_policer_release(struct prestera_switch *sw, u32 policer_id) { struct prestera_msg_policer_req req = { .id = __cpu_to_le32(policer_id) }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE, &req.cmd, sizeof(req)); } int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, u32 policer_id, u64 cir, u32 cbs) { struct prestera_msg_policer_req req = { .mode = PRESTERA_POLICER_MODE_SR_TCM, .id = __cpu_to_le32(policer_id), .sr_tcm = { .cir = __cpu_to_le64(cir), .cbs = __cpu_to_le32(cbs) } }; return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET, &req.cmd, sizeof(req)); } int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain) { struct prestera_msg_flood_domain_create_resp resp; struct prestera_msg_flood_domain_create_req req; int err; err = prestera_cmd_ret(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; domain->idx = __le32_to_cpu(resp.flood_domain_idx); return 0; } int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain) { struct prestera_msg_flood_domain_destroy_req req = { .flood_domain_idx = __cpu_to_le32(domain->idx), }; return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY, &req.cmd, sizeof(req)); } int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain) { struct prestera_flood_domain_port *flood_domain_port; struct prestera_msg_flood_domain_ports_set_req *req; struct prestera_msg_flood_domain_port *ports; struct prestera_switch *sw = domain->sw; struct prestera_port *port; u32 ports_num = 0; int buf_size; void *buff; u16 lag_id; int err; list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list, flood_domain_port_node) ports_num++; if (!ports_num) return -EINVAL; buf_size = sizeof(*req) + sizeof(*ports) * ports_num; buff = kmalloc(buf_size, GFP_KERNEL); if (!buff) return -ENOMEM; req = buff; ports = buff + sizeof(*req); req->flood_domain_idx = __cpu_to_le32(domain->idx); req->ports_num = __cpu_to_le32(ports_num); list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list, flood_domain_port_node) { if (netif_is_lag_master(flood_domain_port->dev)) { if (prestera_lag_id(sw, flood_domain_port->dev, &lag_id)) { kfree(buff); return -EINVAL; } ports->port_type = __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG); ports->lag_id = __cpu_to_le16(lag_id); } else { port = prestera_port_dev_lower_find(flood_domain_port->dev); ports->port_type = __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT); ports->dev_num = __cpu_to_le32(port->dev_id); ports->port_num = __cpu_to_le32(port->hw_id); } ports->vid = __cpu_to_le16(flood_domain_port->vid); ports++; } err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET, &req->cmd, buf_size); kfree(buff); return err; } int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain) { struct prestera_msg_flood_domain_ports_reset_req req = { .flood_domain_idx = __cpu_to_le32(domain->idx), }; return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd, sizeof(req)); } int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb) { struct prestera_msg_mdb_create_req req = { .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx), .vid = __cpu_to_le16(mdb->vid), }; memcpy(req.mac, mdb->addr, ETH_ALEN); return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd, sizeof(req)); } int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb) { struct prestera_msg_mdb_destroy_req req = { .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx), .vid = __cpu_to_le16(mdb->vid), }; memcpy(req.mac, mdb->addr, ETH_ALEN); return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd, sizeof(req)); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_hw.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */ #include <linux/rhashtable.h> #include "prestera_acl.h" #include "prestera_flow.h" #include "prestera_hw.h" #include "prestera.h" #define ACL_KEYMASK_SIZE \ (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) struct prestera_acl { struct prestera_switch *sw; struct list_head vtcam_list; struct list_head rules; struct rhashtable ruleset_ht; struct rhashtable acl_rule_entry_ht; struct idr uid; }; struct prestera_acl_ruleset_ht_key { struct prestera_flow_block *block; u32 chain_index; }; struct prestera_acl_rule_entry { struct rhash_head ht_node; struct prestera_acl_rule_entry_key key; u32 hw_id; u32 vtcam_id; struct { struct { u8 valid:1; } accept, drop, trap; struct { u8 valid:1; struct prestera_acl_action_police i; } police; struct { struct prestera_acl_action_jump i; u8 valid:1; } jump; struct { u32 id; struct prestera_counter_block *block; } counter; }; }; struct prestera_acl_ruleset { struct rhash_head ht_node; /* Member of acl HT */ struct prestera_acl_ruleset_ht_key ht_key; struct rhashtable rule_ht; struct prestera_acl *acl; struct { u32 min; u32 max; } prio; unsigned long rule_count; refcount_t refcount; void *keymask; u32 vtcam_id; u32 index; u16 pcl_id; bool offload; bool ingress; }; struct prestera_acl_vtcam { struct list_head list; __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; refcount_t refcount; u32 id; bool is_keymask_set; u8 lookup; u8 direction; }; static const struct rhashtable_params prestera_acl_ruleset_ht_params = { .key_len = sizeof(struct prestera_acl_ruleset_ht_key), .key_offset = offsetof(struct prestera_acl_ruleset, ht_key), .head_offset = offsetof(struct prestera_acl_ruleset, ht_node), .automatic_shrinking = true, }; static const struct rhashtable_params prestera_acl_rule_ht_params = { .key_len = sizeof(unsigned long), .key_offset = offsetof(struct prestera_acl_rule, cookie), .head_offset = offsetof(struct prestera_acl_rule, ht_node), .automatic_shrinking = true, }; static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = { .key_offset = offsetof(struct prestera_acl_rule_entry, key), .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node), .key_len = sizeof(struct prestera_acl_rule_entry_key), .automatic_shrinking = true, }; int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client) { static const u32 ingress_client_map[] = { PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0, PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1, PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 }; if (!ingress) { /* prestera supports only one chain on egress */ if (chain_index > 0) return -EINVAL; *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP; return 0; } if (chain_index >= ARRAY_SIZE(ingress_client_map)) return -EINVAL; *client = ingress_client_map[chain_index]; return 0; } static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress) { if (!ingress) /* prestera supports only one chain on egress */ return chain_index == 0; return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0; } static struct prestera_acl_ruleset * prestera_acl_ruleset_create(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index) { struct prestera_acl_ruleset *ruleset; u32 uid = 0; int err; if (!prestera_acl_chain_is_supported(chain_index, block->ingress)) return ERR_PTR(-EINVAL); ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); if (!ruleset) return ERR_PTR(-ENOMEM); ruleset->acl = acl; ruleset->ingress = block->ingress; ruleset->ht_key.block = block; ruleset->ht_key.chain_index = chain_index; refcount_set(&ruleset->refcount, 1); err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); if (err) goto err_rhashtable_init; err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL); if (err) goto err_ruleset_create; /* make pcl-id based on uid */ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index); ruleset->index = uid; ruleset->prio.min = UINT_MAX; ruleset->prio.max = 0; err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, prestera_acl_ruleset_ht_params); if (err) goto err_ruleset_ht_insert; return ruleset; err_ruleset_ht_insert: idr_remove(&acl->uid, uid); err_ruleset_create: rhashtable_destroy(&ruleset->rule_ht); err_rhashtable_init: kfree(ruleset); return ERR_PTR(err); } int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, void *keymask) { ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); if (!ruleset->keymask) return -ENOMEM; return 0; } int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) { struct prestera_acl_iface iface; u32 vtcam_id; int dir; int err; dir = ruleset->ingress ? PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS; if (ruleset->offload) return -EEXIST; err = prestera_acl_vtcam_id_get(ruleset->acl, ruleset->ht_key.chain_index, dir, ruleset->keymask, &vtcam_id); if (err) goto err_vtcam_create; if (ruleset->ht_key.chain_index) { /* for chain > 0, bind iface index to pcl-id to be able * to jump from any other ruleset to this one using the index. */ iface.index = ruleset->index; iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX; err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface, vtcam_id, ruleset->pcl_id); if (err) goto err_ruleset_bind; } ruleset->vtcam_id = vtcam_id; ruleset->offload = true; return 0; err_ruleset_bind: prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id); err_vtcam_create: return err; } static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) { struct prestera_acl *acl = ruleset->acl; u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER; int err; rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, prestera_acl_ruleset_ht_params); if (ruleset->offload) { if (ruleset->ht_key.chain_index) { struct prestera_acl_iface iface = { .type = PRESTERA_ACL_IFACE_TYPE_INDEX, .index = ruleset->index }; err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface, ruleset->vtcam_id); WARN_ON(err); } WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id)); } idr_remove(&acl->uid, uid); rhashtable_destroy(&ruleset->rule_ht); kfree(ruleset->keymask); kfree(ruleset); } static struct prestera_acl_ruleset * __prestera_acl_ruleset_lookup(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index) { struct prestera_acl_ruleset_ht_key ht_key; memset(&ht_key, 0, sizeof(ht_key)); ht_key.block = block; ht_key.chain_index = chain_index; return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, prestera_acl_ruleset_ht_params); } struct prestera_acl_ruleset * prestera_acl_ruleset_lookup(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index) { struct prestera_acl_ruleset *ruleset; ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); if (!ruleset) return ERR_PTR(-ENOENT); refcount_inc(&ruleset->refcount); return ruleset; } struct prestera_acl_ruleset * prestera_acl_ruleset_get(struct prestera_acl *acl, struct prestera_flow_block *block, u32 chain_index) { struct prestera_acl_ruleset *ruleset; ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index); if (ruleset) { refcount_inc(&ruleset->refcount); return ruleset; } return prestera_acl_ruleset_create(acl, block, chain_index); } void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset) { if (!refcount_dec_and_test(&ruleset->refcount)) return; prestera_acl_ruleset_destroy(ruleset); } int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, struct prestera_port *port) { struct prestera_acl_iface iface = { .type = PRESTERA_ACL_IFACE_TYPE_PORT, .port = port }; return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id, ruleset->pcl_id); } int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, struct prestera_port *port) { struct prestera_acl_iface iface = { .type = PRESTERA_ACL_IFACE_TYPE_PORT, .port = port }; return prestera_hw_vtcam_iface_unbind(port->sw, &iface, ruleset->vtcam_id); } static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset, struct prestera_flow_block *block) { struct prestera_flow_block_binding *binding; int err; block->ruleset_zero = ruleset; list_for_each_entry(binding, &block->binding_list, list) { err = prestera_acl_ruleset_bind(ruleset, binding->port); if (err) goto rollback; } return 0; rollback: list_for_each_entry_continue_reverse(binding, &block->binding_list, list) err = prestera_acl_ruleset_unbind(ruleset, binding->port); block->ruleset_zero = NULL; return err; } static void prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset, struct prestera_flow_block *block) { struct prestera_flow_block_binding *binding; list_for_each_entry(binding, &block->binding_list, list) prestera_acl_ruleset_unbind(ruleset, binding->port); block->ruleset_zero = NULL; } static void prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl, struct prestera_acl_ruleset *ruleset) { struct prestera_acl_rule *rule; ruleset->prio.min = UINT_MAX; ruleset->prio.max = 0; list_for_each_entry(rule, &acl->rules, list) { if (ruleset->ingress != rule->ruleset->ingress) continue; if (ruleset->ht_key.chain_index != rule->chain_index) continue; ruleset->prio.min = min(ruleset->prio.min, rule->priority); ruleset->prio.max = max(ruleset->prio.max, rule->priority); } } void prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id) { struct prestera_acl_match *r_match = &rule->re_key.match; __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID); __be16 pcl_id_key = htons(pcl_id); rule_match_set(r_match->key, PCL_ID, pcl_id_key); rule_match_set(r_match->mask, PCL_ID, pcl_id_mask); } struct prestera_acl_rule * prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, unsigned long cookie) { return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, prestera_acl_rule_ht_params); } u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset) { return ruleset->index; } void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset, u32 *prio_min, u32 *prio_max) { *prio_min = ruleset->prio.min; *prio_max = ruleset->prio.max; } bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) { return ruleset->offload; } struct prestera_acl_rule * prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, unsigned long cookie, u32 chain_index) { struct prestera_acl_rule *rule; rule = kzalloc(sizeof(*rule), GFP_KERNEL); if (!rule) return ERR_PTR(-ENOMEM); rule->ruleset = ruleset; rule->cookie = cookie; rule->chain_index = chain_index; refcount_inc(&ruleset->refcount); return rule; } void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, u32 priority) { rule->priority = priority; } void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) { if (rule->jump_ruleset) /* release ruleset kept by jump action */ prestera_acl_ruleset_put(rule->jump_ruleset); prestera_acl_ruleset_put(rule->ruleset); kfree(rule); } static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset, u32 prio) { ruleset->prio.min = min(ruleset->prio.min, prio); ruleset->prio.max = max(ruleset->prio.max, prio); } int prestera_acl_rule_add(struct prestera_switch *sw, struct prestera_acl_rule *rule) { int err; struct prestera_acl_ruleset *ruleset = rule->ruleset; struct prestera_flow_block *block = ruleset->ht_key.block; /* try to add rule to hash table first */ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, prestera_acl_rule_ht_params); if (err) goto err_ht_insert; prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id); rule->re_arg.vtcam_id = ruleset->vtcam_id; rule->re_key.prio = rule->priority; rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); err = WARN_ON(rule->re) ? -EEXIST : 0; if (err) goto err_rule_add; rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key, &rule->re_arg); err = !rule->re ? -EINVAL : 0; if (err) goto err_rule_add; /* bind the block (all ports) to chain index 0, rest of * the chains are bound to goto action */ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) { err = prestera_acl_ruleset_block_bind(ruleset, block); if (err) goto err_acl_block_bind; } list_add_tail(&rule->list, &sw->acl->rules); ruleset->rule_count++; prestera_acl_ruleset_prio_update(ruleset, rule->priority); return 0; err_acl_block_bind: prestera_acl_rule_entry_destroy(sw->acl, rule->re); err_rule_add: rule->re = NULL; rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, prestera_acl_rule_ht_params); err_ht_insert: return err; } void prestera_acl_rule_del(struct prestera_switch *sw, struct prestera_acl_rule *rule) { struct prestera_acl_ruleset *ruleset = rule->ruleset; struct prestera_flow_block *block = ruleset->ht_key.block; rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, prestera_acl_rule_ht_params); ruleset->rule_count--; list_del(&rule->list); prestera_acl_rule_entry_destroy(sw->acl, rule->re); prestera_acl_ruleset_prio_refresh(sw->acl, ruleset); /* unbind block (all ports) */ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) prestera_acl_ruleset_block_unbind(ruleset, block); } int prestera_acl_rule_get_stats(struct prestera_acl *acl, struct prestera_acl_rule *rule, u64 *packets, u64 *bytes, u64 *last_use) { u64 current_packets; u64 current_bytes; int err; err = prestera_counter_stats_get(acl->sw->counter, rule->re->counter.block, rule->re->counter.id, &current_packets, &current_bytes); if (err) return err; *packets = current_packets; *bytes = current_bytes; *last_use = jiffies; return 0; } struct prestera_acl_rule_entry * prestera_acl_rule_entry_find(struct prestera_acl *acl, struct prestera_acl_rule_entry_key *key) { return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, __prestera_acl_rule_entry_ht_params); } static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw, struct prestera_acl_rule_entry *e) { return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id); } static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, struct prestera_acl_rule_entry *e) { struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX]; int act_num; memset(&act_hw, 0, sizeof(act_hw)); act_num = 0; /* accept */ if (e->accept.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT; act_num++; } /* drop */ if (e->drop.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP; act_num++; } /* trap */ if (e->trap.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; act_num++; } /* police */ if (e->police.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE; act_hw[act_num].police = e->police.i; act_num++; } /* jump */ if (e->jump.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP; act_hw[act_num].jump = e->jump.i; act_num++; } /* counter */ if (e->counter.block) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT; act_hw[act_num].count.id = e->counter.id; act_num++; } return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio, e->key.match.key, e->key.match.mask, act_hw, act_num, &e->hw_id); } static void __prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, struct prestera_acl_rule_entry *e) { /* counter */ prestera_counter_put(sw->counter, e->counter.block, e->counter.id); /* police */ if (e->police.valid) prestera_hw_policer_release(sw, e->police.i.id); } void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, struct prestera_acl_rule_entry *e) { int ret; rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node, __prestera_acl_rule_entry_ht_params); ret = __prestera_acl_rule_entry2hw_del(acl->sw, e); WARN_ON(ret && ret != -ENODEV); __prestera_acl_rule_entry_act_destruct(acl->sw, e); kfree(e); } static int __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, struct prestera_acl_rule_entry *e, struct prestera_acl_rule_entry_arg *arg) { int err; /* accept */ e->accept.valid = arg->accept.valid; /* drop */ e->drop.valid = arg->drop.valid; /* trap */ e->trap.valid = arg->trap.valid; /* jump */ e->jump.valid = arg->jump.valid; e->jump.i = arg->jump.i; /* police */ if (arg->police.valid) { u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS : PRESTERA_POLICER_TYPE_EGRESS; err = prestera_hw_policer_create(sw, type, &e->police.i.id); if (err) goto err_out; err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id, arg->police.rate, arg->police.burst); if (err) { prestera_hw_policer_release(sw, e->police.i.id); goto err_out; } e->police.valid = arg->police.valid; } /* counter */ if (arg->count.valid) { err = prestera_counter_get(sw->counter, arg->count.client, &e->counter.block, &e->counter.id); if (err) goto err_out; } return 0; err_out: __prestera_acl_rule_entry_act_destruct(sw, e); return -EINVAL; } struct prestera_acl_rule_entry * prestera_acl_rule_entry_create(struct prestera_acl *acl, struct prestera_acl_rule_entry_key *key, struct prestera_acl_rule_entry_arg *arg) { struct prestera_acl_rule_entry *e; int err; e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) goto err_kzalloc; memcpy(&e->key, key, sizeof(*key)); e->vtcam_id = arg->vtcam_id; err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg); if (err) goto err_act_construct; err = __prestera_acl_rule_entry2hw_add(acl->sw, e); if (err) goto err_hw_add; err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node, __prestera_acl_rule_entry_ht_params); if (err) goto err_ht_insert; return e; err_ht_insert: WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e)); err_hw_add: __prestera_acl_rule_entry_act_destruct(acl->sw, e); err_act_construct: kfree(e); err_kzalloc: return NULL; } static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup, void *keymask, u32 *vtcam_id) { struct prestera_acl_vtcam *vtcam; int i; list_for_each_entry(vtcam, &acl->vtcam_list, list) { if (lookup != vtcam->lookup) continue; if (!keymask && !vtcam->is_keymask_set) goto vtcam_found; if (!(keymask && vtcam->is_keymask_set)) continue; /* try to fit with vtcam keymask */ for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) { __be32 __keymask = ((__be32 *)keymask)[i]; if (!__keymask) /* vtcam keymask in not interested */ continue; if (__keymask & ~vtcam->keymask[i]) /* keymask does not fit the vtcam keymask */ break; } if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) /* keymask fits vtcam keymask, return it */ goto vtcam_found; } /* nothing is found */ return -ENOENT; vtcam_found: refcount_inc(&vtcam->refcount); *vtcam_id = vtcam->id; return 0; } int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir, void *keymask, u32 *vtcam_id) { struct prestera_acl_vtcam *vtcam; u32 new_vtcam_id; int err; /* find the vtcam that suits keymask. We do not expect to have * a big number of vtcams, so, the list type for vtcam list is * fine for now */ list_for_each_entry(vtcam, &acl->vtcam_list, list) { if (lookup != vtcam->lookup || dir != vtcam->direction) continue; if (!keymask && !vtcam->is_keymask_set) { refcount_inc(&vtcam->refcount); goto vtcam_found; } if (keymask && vtcam->is_keymask_set && !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) { refcount_inc(&vtcam->refcount); goto vtcam_found; } } /* vtcam not found, try to create new one */ vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL); if (!vtcam) return -ENOMEM; err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id, dir); if (err) { kfree(vtcam); /* cannot create new, try to fit into existing vtcam */ if (__prestera_acl_vtcam_id_try_fit(acl, lookup, keymask, &new_vtcam_id)) return err; *vtcam_id = new_vtcam_id; return 0; } vtcam->direction = dir; vtcam->id = new_vtcam_id; vtcam->lookup = lookup; if (keymask) { memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask)); vtcam->is_keymask_set = true; } refcount_set(&vtcam->refcount, 1); list_add_rcu(&vtcam->list, &acl->vtcam_list); vtcam_found: *vtcam_id = vtcam->id; return 0; } int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id) { struct prestera_acl_vtcam *vtcam; int err; list_for_each_entry(vtcam, &acl->vtcam_list, list) { if (vtcam_id != vtcam->id) continue; if (!refcount_dec_and_test(&vtcam->refcount)) return 0; err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id); if (err && err != -ENODEV) { refcount_set(&vtcam->refcount, 1); return err; } list_del(&vtcam->list); kfree(vtcam); return 0; } return -ENOENT; } int prestera_acl_init(struct prestera_switch *sw) { struct prestera_acl *acl; int err; acl = kzalloc(sizeof(*acl), GFP_KERNEL); if (!acl) return -ENOMEM; acl->sw = sw; INIT_LIST_HEAD(&acl->rules); INIT_LIST_HEAD(&acl->vtcam_list); idr_init(&acl->uid); err = rhashtable_init(&acl->acl_rule_entry_ht, &__prestera_acl_rule_entry_ht_params); if (err) goto err_acl_rule_entry_ht_init; err = rhashtable_init(&acl->ruleset_ht, &prestera_acl_ruleset_ht_params); if (err) goto err_ruleset_ht_init; sw->acl = acl; return 0; err_ruleset_ht_init: rhashtable_destroy(&acl->acl_rule_entry_ht); err_acl_rule_entry_ht_init: kfree(acl); return err; } void prestera_acl_fini(struct prestera_switch *sw) { struct prestera_acl *acl = sw->acl; WARN_ON(!idr_is_empty(&acl->uid)); idr_destroy(&acl->uid); WARN_ON(!list_empty(&acl->vtcam_list)); WARN_ON(!list_empty(&acl->rules)); rhashtable_destroy(&acl->ruleset_ht); rhashtable_destroy(&acl->acl_rule_entry_ht); kfree(acl); }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_acl.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ #include "prestera.h" #include "prestera_acl.h" #include "prestera_flow.h" #include "prestera_flower.h" #include "prestera_matchall.h" struct prestera_flower_template { struct prestera_acl_ruleset *ruleset; struct list_head list; u32 chain_index; }; static void prestera_flower_template_free(struct prestera_flower_template *template) { prestera_acl_ruleset_put(template->ruleset); list_del(&template->list); kfree(template); } void prestera_flower_template_cleanup(struct prestera_flow_block *block) { struct prestera_flower_template *template, *tmp; /* put the reference to all rulesets kept in tmpl create */ list_for_each_entry_safe(template, tmp, &block->template_list, list) prestera_flower_template_free(template); } static int prestera_flower_parse_goto_action(struct prestera_flow_block *block, struct prestera_acl_rule *rule, u32 chain_index, const struct flow_action_entry *act) { struct prestera_acl_ruleset *ruleset; if (act->chain_index <= chain_index) /* we can jump only forward */ return -EINVAL; if (rule->re_arg.jump.valid) return -EEXIST; ruleset = prestera_acl_ruleset_get(block->sw->acl, block, act->chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); rule->re_arg.jump.valid = 1; rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset); rule->jump_ruleset = ruleset; return 0; } static int prestera_flower_parse_actions(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_action *flow_action, u32 chain_index, struct netlink_ext_ack *extack) { const struct flow_action_entry *act; int err, i; /* whole struct (rule->re_arg) must be initialized with 0 */ if (!flow_action_has_entries(flow_action)) return 0; if (!flow_action_mixed_hw_stats_check(flow_action, extack)) return -EOPNOTSUPP; act = flow_action_first_entry_get(flow_action); if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { /* Nothing to do */ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) { /* setup counter first */ rule->re_arg.count.valid = true; err = prestera_acl_chain_to_client(chain_index, block->ingress, &rule->re_arg.count.client); if (err) return err; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); return -EOPNOTSUPP; } flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_ACCEPT: if (rule->re_arg.accept.valid) return -EEXIST; rule->re_arg.accept.valid = 1; break; case FLOW_ACTION_DROP: if (rule->re_arg.drop.valid) return -EEXIST; rule->re_arg.drop.valid = 1; break; case FLOW_ACTION_TRAP: if (rule->re_arg.trap.valid) return -EEXIST; rule->re_arg.trap.valid = 1; break; case FLOW_ACTION_POLICE: if (rule->re_arg.police.valid) return -EEXIST; rule->re_arg.police.valid = 1; rule->re_arg.police.rate = act->police.rate_bytes_ps; rule->re_arg.police.burst = act->police.burst; rule->re_arg.police.ingress = block->ingress; break; case FLOW_ACTION_GOTO: err = prestera_flower_parse_goto_action(block, rule, chain_index, act); if (err) return err; break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); pr_err("Unsupported action\n"); return -EOPNOTSUPP; } } return 0; } static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, struct flow_cls_offload *f, struct prestera_flow_block *block) { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); struct prestera_acl_match *r_match = &rule->re_key.match; struct prestera_port *port; struct net_device *ingress_dev; struct flow_match_meta match; __be16 key, mask; flow_rule_match_meta(f_rule, &match); if (match.mask->l2_miss) { NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\""); return -EOPNOTSUPP; } if (match.mask->ingress_ifindex != 0xFFFFFFFF) { NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); return -EINVAL; } ingress_dev = __dev_get_by_index(block->net, match.key->ingress_ifindex); if (!ingress_dev) { NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); return -EINVAL; } if (!prestera_netdev_check(ingress_dev)) { NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on switchdev ingress port"); return -EINVAL; } port = netdev_priv(ingress_dev); mask = htons(0x1FFF << 3); key = htons(port->hw_id << 3); rule_match_set(r_match->key, SYS_PORT, key); rule_match_set(r_match->mask, SYS_PORT, mask); mask = htons(0x3FF); key = htons(port->dev_id); rule_match_set(r_match->key, SYS_DEV, key); rule_match_set(r_match->mask, SYS_DEV, mask); return 0; } static int prestera_flower_parse(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_cls_offload *f) { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = f_rule->match.dissector; struct prestera_acl_match *r_match = &rule->re_key.match; __be16 n_proto_mask = 0; __be16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) | BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) { NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; } prestera_acl_rule_priority_set(rule, f->common.prio); if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) { err = prestera_flower_parse_meta(rule, f, block); if (err) return err; } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; flow_rule_match_control(f_rule, &match); addr_type = match.key->addr_type; } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(f_rule, &match); n_proto_key = match.key->n_proto; n_proto_mask = match.mask->n_proto; if (ntohs(match.key->n_proto) == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } rule_match_set(r_match->key, ETH_TYPE, n_proto_key); rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); ip_proto = match.key->ip_proto; } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(f_rule, &match); /* DA key, mask */ rule_match_set_n(r_match->key, ETH_DMAC_0, &match.key->dst[0], 4); rule_match_set_n(r_match->key, ETH_DMAC_1, &match.key->dst[4], 2); rule_match_set_n(r_match->mask, ETH_DMAC_0, &match.mask->dst[0], 4); rule_match_set_n(r_match->mask, ETH_DMAC_1, &match.mask->dst[4], 2); /* SA key, mask */ rule_match_set_n(r_match->key, ETH_SMAC_0, &match.key->src[0], 4); rule_match_set_n(r_match->key, ETH_SMAC_1, &match.key->src[4], 2); rule_match_set_n(r_match->mask, ETH_SMAC_0, &match.mask->src[0], 4); rule_match_set_n(r_match->mask, ETH_SMAC_1, &match.mask->src[4], 2); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(f_rule, &match); rule_match_set(r_match->key, IP_SRC, match.key->src); rule_match_set(r_match->mask, IP_SRC, match.mask->src); rule_match_set(r_match->key, IP_DST, match.key->dst); rule_match_set(r_match->mask, IP_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { NL_SET_ERR_MSG_MOD (f->common.extack, "Only UDP and TCP keys are supported"); return -EINVAL; } flow_rule_match_ports(f_rule, &match); rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) { struct flow_match_ports_range match; __be32 tp_key, tp_mask; flow_rule_match_ports_range(f_rule, &match); /* src port range (min, max) */ tp_key = htonl(ntohs(match.key->tp_min.src) | (ntohs(match.key->tp_max.src) << 16)); tp_mask = htonl(ntohs(match.mask->tp_min.src) | (ntohs(match.mask->tp_max.src) << 16)); rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key); rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask); /* dst port range (min, max) */ tp_key = htonl(ntohs(match.key->tp_min.dst) | (ntohs(match.key->tp_max.dst) << 16)); tp_mask = htonl(ntohs(match.mask->tp_min.dst) | (ntohs(match.mask->tp_max.dst) << 16)); rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key); rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(f_rule, &match); if (match.mask->vlan_id != 0) { __be16 key = cpu_to_be16(match.key->vlan_id); __be16 mask = cpu_to_be16(match.mask->vlan_id); rule_match_set(r_match->key, VLAN_ID, key); rule_match_set(r_match->mask, VLAN_ID, mask); } rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { struct flow_match_icmp match; flow_rule_match_icmp(f_rule, &match); rule_match_set(r_match->key, ICMP_TYPE, match.key->type); rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); rule_match_set(r_match->key, ICMP_CODE, match.key->code); rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); } return prestera_flower_parse_actions(block, rule, &f->rule->action, f->common.chain_index, f->common.extack); } static int prestera_flower_prio_check(struct prestera_flow_block *block, struct flow_cls_offload *f) { u32 mall_prio_min; u32 mall_prio_max; int err; err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max); if (err == -ENOENT) /* No matchall filters installed on this chain. */ return 0; if (err) { NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); return err; } if (f->common.prio <= mall_prio_max && block->ingress) { NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules"); return -EOPNOTSUPP; } if (f->common.prio >= mall_prio_min && !block->ingress) { NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); return -EOPNOTSUPP; } return 0; } int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index, u32 *prio_min, u32 *prio_max) { struct prestera_acl_ruleset *ruleset; ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max); return 0; } int prestera_flower_replace(struct prestera_flow_block *block, struct flow_cls_offload *f) { struct prestera_acl_ruleset *ruleset; struct prestera_acl *acl = block->sw->acl; struct prestera_acl_rule *rule; int err; err = prestera_flower_prio_check(block, f); if (err) return err; ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* increments the ruleset reference */ rule = prestera_acl_rule_create(ruleset, f->cookie, f->common.chain_index); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto err_rule_create; } err = prestera_flower_parse(block, rule, f); if (err) goto err_rule_add; if (!prestera_acl_ruleset_is_offload(ruleset)) { err = prestera_acl_ruleset_offload(ruleset); if (err) goto err_ruleset_offload; } err = prestera_acl_rule_add(block->sw, rule); if (err) goto err_rule_add; prestera_acl_ruleset_put(ruleset); return 0; err_ruleset_offload: err_rule_add: prestera_acl_rule_destroy(rule); err_rule_create: prestera_acl_ruleset_put(ruleset); return err; } void prestera_flower_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f) { struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, f->common.chain_index); if (IS_ERR(ruleset)) return; rule = prestera_acl_rule_lookup(ruleset, f->cookie); if (rule) { prestera_acl_rule_del(block->sw, rule); prestera_acl_rule_destroy(rule); } prestera_acl_ruleset_put(ruleset); } int prestera_flower_tmplt_create(struct prestera_flow_block *block, struct flow_cls_offload *f) { struct prestera_flower_template *template; struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule rule; int err; memset(&rule, 0, sizeof(rule)); err = prestera_flower_parse(block, &rule, f); if (err) return err; template = kmalloc(sizeof(*template), GFP_KERNEL); if (!template) { err = -ENOMEM; goto err_malloc; } prestera_acl_rule_keymask_pcl_id_set(&rule, 0); ruleset = prestera_acl_ruleset_get(block->sw->acl, block, f->common.chain_index); if (IS_ERR_OR_NULL(ruleset)) { err = -EINVAL; goto err_ruleset_get; } /* preserve keymask/template to this ruleset */ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); if (err) goto err_ruleset_keymask_set; /* skip error, as it is not possible to reject template operation, * so, keep the reference to the ruleset for rules to be added * to that ruleset later. In case of offload fail, the ruleset * will be offloaded again during adding a new rule. Also, * unlikly possble that ruleset is already offloaded at this staage. */ prestera_acl_ruleset_offload(ruleset); /* keep the reference to the ruleset */ template->ruleset = ruleset; template->chain_index = f->common.chain_index; list_add_rcu(&template->list, &block->template_list); return 0; err_ruleset_keymask_set: prestera_acl_ruleset_put(ruleset); err_ruleset_get: kfree(template); err_malloc: NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed"); return err; } void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f) { struct prestera_flower_template *template, *tmp; list_for_each_entry_safe(template, tmp, &block->template_list, list) if (template->chain_index == f->common.chain_index) { /* put the reference to the ruleset kept in create */ prestera_flower_template_free(template); return; } } int prestera_flower_stats(struct prestera_flow_block *block, struct flow_cls_offload *f) { struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; u64 packets; u64 lastuse; u64 bytes; int err; ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, f->common.chain_index); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); rule = prestera_acl_rule_lookup(ruleset, f->cookie); if (!rule) { err = -EINVAL; goto err_rule_get_stats; } err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, &bytes, &lastuse); if (err) goto err_rule_get_stats; flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); err_rule_get_stats: prestera_acl_ruleset_put(ruleset); return err; }
linux-master
drivers/net/ethernet/marvell/prestera/prestera_flower.c
// SPDX-License-Identifier: GPL-2.0 /* * Marvell PP2.2 TAI support * * Note: * Do NOT use the event capture support. * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used. * It will disrupt the operation of this driver, and there is nothing * that this driver can do to prevent that. Even using PTP_EVENT_REQ * as an output will be seen as a trigger input, which can't be masked. * When ever a trigger input is seen, the action in the TCFCR0_TCF * field will be performed - whether it is a set, increment, decrement * read, or frequency update. * * Other notes (useful, not specified in the documentation): * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP) * It looks like the hardware can't generate a pulse at nsec=0. (The * output doesn't trigger if the nsec field is zero.) * Note: when configured as an output via the register at 0xfX441120, * the input is still very much alive, and will trigger the current TCF * function. * - PTP_CLK_OUT (PTP_TRIG_GEN MPP) * This generates a "PPS" signal determined by the CCC registers. It * seems this is not aligned to the TOD counter in any way (it may be * initially, but if you specify a non-round second interval, it won't, * and you can't easily get it back.) * - PTP_PCLK_OUT * This generates a 50% duty cycle clock based on the TOD counter, and * seems it can be set to any period of 1ns resolution. It is probably * limited by the TOD step size. Its period is defined by the PCLK_CCC * registers. Again, its alignment to the second is questionable. * * Consequently, we support none of these. */ #include <linux/io.h> #include <linux/ptp_clock_kernel.h> #include <linux/slab.h> #include "mvpp2.h" #define CR0_SW_NRESET BIT(0) #define TCFCR0_PHASE_UPDATE_ENABLE BIT(8) #define TCFCR0_TCF_MASK (7 << 2) #define TCFCR0_TCF_UPDATE (0 << 2) #define TCFCR0_TCF_FREQUPDATE (1 << 2) #define TCFCR0_TCF_INCREMENT (2 << 2) #define TCFCR0_TCF_DECREMENT (3 << 2) #define TCFCR0_TCF_CAPTURE (4 << 2) #define TCFCR0_TCF_NOP (7 << 2) #define TCFCR0_TCF_TRIGGER BIT(0) #define TCSR_CAPTURE_1_VALID BIT(1) #define TCSR_CAPTURE_0_VALID BIT(0) struct mvpp2_tai { struct ptp_clock_info caps; struct ptp_clock *ptp_clock; void __iomem *base; spinlock_t lock; u64 period; // nanosecond period in 32.32 fixed point /* This timestamp is updated every two seconds */ struct timespec64 stamp; }; static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set) { u32 val; val = readl_relaxed(reg) & ~mask; val |= set & mask; writel(val, reg); } static void mvpp2_tai_write(u32 val, void __iomem *reg) { writel_relaxed(val & 0xffff, reg); } static u32 mvpp2_tai_read(void __iomem *reg) { return readl_relaxed(reg) & 0xffff; } static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp) { return container_of(ptp, struct mvpp2_tai, caps); } static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base) { ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 | mvpp2_tai_read(base + 4) << 16 | mvpp2_tai_read(base + 8); ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 | mvpp2_tai_read(base + 16); /* Read and discard fractional part */ readl_relaxed(base + 20); readl_relaxed(base + 24); } static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac, void __iomem *base) { mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH); mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED); mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW); mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH); mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW); mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH); mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW); } static void mvpp2_tai_op(u32 op, void __iomem *base) { /* Trigger the operation. Note that an external unmaskable * event on PTP_EVENT_REQ will also trigger this action. */ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, op | TCFCR0_TCF_TRIGGER); mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, TCFCR0_TCF_NOP); } /* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units * of 2^-32 ns. * * units(s) = 1 / (2^32 * 10^9) * fractional = abs_scaled_ppm / (2^16 * 10^6) * * What we want to achieve: * freq_adjusted = freq_nominal * (1 + fractional) * freq_delta = freq_adjusted - freq_nominal => positive = faster * freq_delta = freq_nominal * (1 + fractional) - freq_nominal * So: freq_delta = freq_nominal * fractional * * However, we are dealing with periods, so: * period_adjusted = period_nominal / (1 + fractional) * period_delta = period_nominal - period_adjusted => positive = faster * period_delta = period_nominal * fractional / (1 + fractional) * * Hence: * period_delta = period_nominal * abs_scaled_ppm / * (2^16 * 10^6 + abs_scaled_ppm) * * To avoid overflow, we reduce both sides of the divide operation by a factor * of 16. */ static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm) { u64 val = tai->period * abs_scaled_ppm >> 4; return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4)); } static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai) { return 1000000; } static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct mvpp2_tai *tai = ptp_to_tai(ptp); unsigned long flags; void __iomem *base; bool neg_adj; s32 frac; u64 val; neg_adj = scaled_ppm < 0; if (neg_adj) scaled_ppm = -scaled_ppm; val = mvpp22_calc_frac_ppm(tai, scaled_ppm); /* Convert to a signed 32-bit adjustment */ if (neg_adj) { /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy * solution. */ if (val > 0x80000000) return -ERANGE; frac = -val; } else { if (val > S32_MAX) return -ERANGE; frac = val; } base = tai->base; spin_lock_irqsave(&tai->lock, flags); mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH); mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW); mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base); spin_unlock_irqrestore(&tai->lock, flags); return 0; } static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct mvpp2_tai *tai = ptp_to_tai(ptp); struct timespec64 ts; unsigned long flags; void __iomem *base; u32 tcf; /* We can't deal with S64_MIN */ if (delta == S64_MIN) return -ERANGE; if (delta < 0) { delta = -delta; tcf = TCFCR0_TCF_DECREMENT; } else { tcf = TCFCR0_TCF_INCREMENT; } ts = ns_to_timespec64(delta); base = tai->base; spin_lock_irqsave(&tai->lock, flags); mvpp2_tai_write_tlv(&ts, 0, base); mvpp2_tai_op(tcf, base); spin_unlock_irqrestore(&tai->lock, flags); return 0; } static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct mvpp2_tai *tai = ptp_to_tai(ptp); unsigned long flags; void __iomem *base; u32 tcsr; int ret; base = tai->base; spin_lock_irqsave(&tai->lock, flags); /* XXX: the only way to read the PTP time is for the CPU to trigger * an event. However, there is no way to distinguish between the CPU * triggered event, and an external event on PTP_EVENT_REQ. So this * is incompatible with external use of PTP_EVENT_REQ. */ ptp_read_system_prets(sts); mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER); ptp_read_system_postts(sts); mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, TCFCR0_TCF_NOP); tcsr = readl(base + MVPP22_TAI_TCSR); if (tcsr & TCSR_CAPTURE_1_VALID) { mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH); ret = 0; } else if (tcsr & TCSR_CAPTURE_0_VALID) { mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH); ret = 0; } else { /* We don't seem to have a reading... */ ret = -EBUSY; } spin_unlock_irqrestore(&tai->lock, flags); return ret; } static int mvpp22_tai_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct mvpp2_tai *tai = ptp_to_tai(ptp); unsigned long flags; void __iomem *base; base = tai->base; spin_lock_irqsave(&tai->lock, flags); mvpp2_tai_write_tlv(ts, 0, base); /* Trigger an update to load the value from the TLV registers * into the TOD counter. Note that an external unmaskable event on * PTP_EVENT_REQ will also trigger this action. */ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_PHASE_UPDATE_ENABLE | TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER, TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER); mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK, TCFCR0_TCF_NOP); spin_unlock_irqrestore(&tai->lock, flags); return 0; } static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp) { struct mvpp2_tai *tai = ptp_to_tai(ptp); mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL); return msecs_to_jiffies(2000); } static void mvpp22_tai_set_step(struct mvpp2_tai *tai) { void __iomem *base = tai->base; u32 nano, frac; nano = upper_32_bits(tai->period); frac = lower_32_bits(tai->period); /* As the fractional nanosecond is a signed offset, if the MSB (sign) * bit is set, we have to increment the whole nanoseconds. */ if (frac >= 0x80000000) nano += 1; mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR); mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH); mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW); } static void mvpp22_tai_init(struct mvpp2_tai *tai) { void __iomem *base = tai->base; mvpp22_tai_set_step(tai); /* Release the TAI reset */ mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET); } int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai) { return ptp_clock_index(tai->ptp_clock); } void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp, struct skb_shared_hwtstamps *hwtstamp) { struct timespec64 ts; int delta; /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds. * We use our stored timestamp (tai->stamp) to form a full timestamp, * and we must read the seconds exactly once. */ ts.tv_sec = READ_ONCE(tai->stamp.tv_sec); ts.tv_nsec = tstamp & 0x3fffffff; /* Calculate the delta in seconds between our stored timestamp and * the value read from the queue. Allow timestamps one second in the * past, otherwise consider them to be in the future. */ delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3; if (delta == 3) delta -= 4; ts.tv_sec += delta; memset(hwtstamp, 0, sizeof(*hwtstamp)); hwtstamp->hwtstamp = timespec64_to_ktime(ts); } void mvpp22_tai_start(struct mvpp2_tai *tai) { long delay; delay = mvpp22_tai_aux_work(&tai->caps); ptp_schedule_worker(tai->ptp_clock, delay); } void mvpp22_tai_stop(struct mvpp2_tai *tai) { ptp_cancel_worker_sync(tai->ptp_clock); } static void mvpp22_tai_remove(void *priv) { struct mvpp2_tai *tai = priv; if (!IS_ERR(tai->ptp_clock)) ptp_clock_unregister(tai->ptp_clock); } int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv) { struct mvpp2_tai *tai; int ret; tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL); if (!tai) return -ENOMEM; spin_lock_init(&tai->lock); tai->base = priv->iface_base; /* The step size consists of three registers - a 16-bit nanosecond step * size, and a 32-bit fractional nanosecond step size split over two * registers. The fractional nanosecond step size has units of 2^-32ns. * * To calculate this, we calculate: * (10^9 + freq / 2) / (freq * 2^-32) * which gives us the nanosecond step to the nearest integer in 16.32 * fixed point format, and the fractional part of the step size with * the MSB inverted. With rounding of the fractional nanosecond, and * simplification, this becomes: * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq * * So: * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq * nano = upper_32_bits(div); * frac = lower_32_bits(div) ^ 0x80000000; * Will give the values for the registers. * * This is all seems perfect, but alas it is not when considering the * whole story. The system is clocked from 25MHz, which is multiplied * by a PLL to 1GHz, and then divided by three, giving 333333333Hz * (recurring). This gives exactly 3ns, but using 333333333Hz with * the above gives an error of 13*2^-32ns. * * Consequently, we use the period rather than calculating from the * frequency. */ tai->period = 3ULL << 32; mvpp22_tai_init(tai); tai->caps.owner = THIS_MODULE; strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name)); tai->caps.max_adj = mvpp22_calc_max_adj(tai); tai->caps.adjfine = mvpp22_tai_adjfine; tai->caps.adjtime = mvpp22_tai_adjtime; tai->caps.gettimex64 = mvpp22_tai_gettimex64; tai->caps.settime64 = mvpp22_tai_settime64; tai->caps.do_aux_work = mvpp22_tai_aux_work; ret = devm_add_action(dev, mvpp22_tai_remove, tai); if (ret) return ret; tai->ptp_clock = ptp_clock_register(&tai->caps, dev); if (IS_ERR(tai->ptp_clock)) return PTR_ERR(tai->ptp_clock); priv->tai = tai; return 0; }
linux-master
drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
// SPDX-License-Identifier: GPL-2.0 /* * RSS and Classifier helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <[email protected]> */ #include "mvpp2.h" #include "mvpp2_cls.h" #include "mvpp2_prs.h" #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \ { \ .flow_type = _type, \ .flow_id = _id, \ .supported_hash_opts = _opts, \ .prs_ri = { \ .ri = _ri, \ .ri_mask = _ri_mask \ } \ } static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { /* TCP over IPv4 flows, Not fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv4 flows, Not fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* TCP over IPv4 flows, fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv4 flows, fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* UDP over IPv4 flows, Not fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv4 flows, Not fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* UDP over IPv4 flows, fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv4 flows, fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* TCP over IPv6 flows, not fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv6 flows, not fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* TCP over IPv6 flows, fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv6 flows, fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* UDP over IPv6 flows, not fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv6 flows, not fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* UDP over IPv6 flows, fragmented, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv6 flows, fragmented, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* IPv4 flows, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv4 flows, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP4_OTHER, MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv6 flows, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv6 flows, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK), MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK), /* Non IP flow, no vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG, 0, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, MVPP22_CLS_HEK_OPT_VLAN, 0, 0), }; u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index) { mvpp2_write(priv, MVPP2_CTRS_IDX, index); return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR); } void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, struct mvpp2_cls_flow_entry *fe) { fe->index = index; mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG); fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG); fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG); } /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, struct mvpp2_cls_flow_entry *fe) { mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) { mvpp2_write(priv, MVPP2_CTRS_IDX, index); return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR); } void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, struct mvpp2_cls_lookup_entry *le) { u32 val; val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid; mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); le->way = way; le->lkpid = lkpid; le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG); } /* Update classification lookup table register */ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, struct mvpp2_cls_lookup_entry *le) { u32 val; val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } /* Operations on flow entry */ static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe) { return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; } static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe, int num_of_fields) { fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields); } static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe, int field_index) { return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) & MVPP2_CLS_FLOW_TBL2_FLD_MASK; } static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe, int field_index, int field_id) { fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index, MVPP2_CLS_FLOW_TBL2_FLD_MASK); fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id); } static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, int engine) { fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK); fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); } int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe) { return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) & MVPP2_CLS_FLOW_TBL0_ENG_MASK; } static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, bool from_packet) { if (from_packet) fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; else fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; } static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, bool is_last) { fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST; fe->data[0] |= !!is_last; } static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio) { fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK); fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio); } static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, u32 port) { fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); } static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe, u32 port) { fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port); } static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe, u8 lu_type) { fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK); fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type); } /* Initialize the parser entry for the given flow */ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, const struct mvpp2_cls_flow *flow) { mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, flow->prs_ri.ri_mask); } /* Initialize the Lookup Id table entry for the given flow */ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, const struct mvpp2_cls_flow *flow) { struct mvpp2_cls_lookup_entry le; le.way = 0; le.lkpid = flow->flow_id; /* The default RxQ for this port is set in the C2 lookup */ le.data = 0; /* We point on the first lookup in the sequence for the flow, that is * the C2 lookup. */ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id)); /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; mvpp2_cls_lookup_write(priv, &le); } static void mvpp2_cls_c2_write(struct mvpp2 *priv, struct mvpp2_cls_c2_entry *c2) { u32 val; mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); if (c2->valid) val &= ~MVPP22_CLS_C2_TCAM_INV_BIT; else val |= MVPP22_CLS_C2_TCAM_INV_BIT; mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val); mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); } void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, struct mvpp2_cls_c2_entry *c2) { u32 val; mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); c2->index = index; c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT); } static int mvpp2_cls_ethtool_flow_to_type(int flow_type) { switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { case ETHER_FLOW: return MVPP22_FLOW_ETHERNET; case TCP_V4_FLOW: return MVPP22_FLOW_TCP4; case TCP_V6_FLOW: return MVPP22_FLOW_TCP6; case UDP_V4_FLOW: return MVPP22_FLOW_UDP4; case UDP_V6_FLOW: return MVPP22_FLOW_UDP6; case IPV4_FLOW: return MVPP22_FLOW_IP4; case IPV6_FLOW: return MVPP22_FLOW_IP6; default: return -EOPNOTSUPP; } } static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc) { return MVPP22_CLS_C2_RFS_LOC(port->id, loc); } /* Initialize the flow table entries for the given flow */ static void mvpp2_cls_flow_init(struct mvpp2 *priv, const struct mvpp2_cls_flow *flow) { struct mvpp2_cls_flow_entry fe; int i, pri = 0; /* Assign default values to all entries in the flow */ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id); i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) { memset(&fe, 0, sizeof(fe)); fe.index = i; mvpp2_cls_flow_pri_set(&fe, pri++); if (i == MVPP2_CLS_FLT_LAST(flow->flow_id)) mvpp2_cls_flow_last_set(&fe, 1); mvpp2_cls_flow_write(priv, &fe); } /* RSS config C2 lookup */ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id), &fe); mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); mvpp2_cls_flow_port_id_sel(&fe, true); mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL); /* Add all ports */ for (i = 0; i < MVPP2_MAX_PORTS; i++) mvpp2_cls_flow_port_add(&fe, BIT(i)); mvpp2_cls_flow_write(priv, &fe); /* C3Hx lookups */ for (i = 0; i < MVPP2_MAX_PORTS; i++) { mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id), &fe); /* Set a default engine. Will be overwritten when setting the * real HEK parameters */ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA); mvpp2_cls_flow_port_id_sel(&fe, true); mvpp2_cls_flow_port_add(&fe, BIT(i)); mvpp2_cls_flow_write(priv, &fe); } } /* Adds a field to the Header Extracted Key generation parameters*/ static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe, u32 field_id) { int nb_fields = mvpp2_cls_flow_hek_num_get(fe); if (nb_fields == MVPP2_FLOW_N_FIELDS) return -EINVAL; mvpp2_cls_flow_hek_set(fe, nb_fields, field_id); mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1); return 0; } static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, unsigned long hash_opts) { u32 field_id; int i; /* Clear old fields */ mvpp2_cls_flow_hek_num_set(fe, 0); fe->data[2] = 0; for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { switch (BIT(i)) { case MVPP22_CLS_HEK_OPT_MAC_DA: field_id = MVPP22_CLS_FIELD_MAC_DA; break; case MVPP22_CLS_HEK_OPT_VLAN: field_id = MVPP22_CLS_FIELD_VLAN; break; case MVPP22_CLS_HEK_OPT_VLAN_PRI: field_id = MVPP22_CLS_FIELD_VLAN_PRI; break; case MVPP22_CLS_HEK_OPT_IP4SA: field_id = MVPP22_CLS_FIELD_IP4SA; break; case MVPP22_CLS_HEK_OPT_IP4DA: field_id = MVPP22_CLS_FIELD_IP4DA; break; case MVPP22_CLS_HEK_OPT_IP6SA: field_id = MVPP22_CLS_FIELD_IP6SA; break; case MVPP22_CLS_HEK_OPT_IP6DA: field_id = MVPP22_CLS_FIELD_IP6DA; break; case MVPP22_CLS_HEK_OPT_L4SIP: field_id = MVPP22_CLS_FIELD_L4SIP; break; case MVPP22_CLS_HEK_OPT_L4DIP: field_id = MVPP22_CLS_FIELD_L4DIP; break; default: return -EINVAL; } if (mvpp2_flow_add_hek_field(fe, field_id)) return -EINVAL; } return 0; } /* Returns the size, in bits, of the corresponding HEK field */ static int mvpp2_cls_hek_field_size(u32 field) { switch (field) { case MVPP22_CLS_HEK_OPT_MAC_DA: return 48; case MVPP22_CLS_HEK_OPT_VLAN: return 12; case MVPP22_CLS_HEK_OPT_VLAN_PRI: return 3; case MVPP22_CLS_HEK_OPT_IP4SA: case MVPP22_CLS_HEK_OPT_IP4DA: return 32; case MVPP22_CLS_HEK_OPT_IP6SA: case MVPP22_CLS_HEK_OPT_IP6DA: return 128; case MVPP22_CLS_HEK_OPT_L4SIP: case MVPP22_CLS_HEK_OPT_L4DIP: return 16; default: return -1; } } const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) { if (flow >= MVPP2_N_PRS_FLOWS) return NULL; return &cls_flows[flow]; } /* Set the hash generation options for the given traffic flow. * One traffic flow (in the ethtool sense) has multiple classification flows, * to handle specific cases such as fragmentation, or the presence of a * VLAN / DSA Tag. * * Each of these individual flows has different constraints, for example we * can't hash fragmented packets on L4 data (else we would risk having packet * re-ordering), so each classification flows masks the options with their * supported ones. * */ static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, u16 requested_opts) { const struct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; int i, engine, flow_index; u16 hash_opts; for_each_cls_flow_id_with_type(i, flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return -EINVAL; flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); hash_opts = flow->supported_hash_opts & requested_opts; /* Use C3HB engine to access L4 infos. This adds L4 infos to the * hash parameters */ if (hash_opts & MVPP22_CLS_HEK_L4_OPTS) engine = MVPP22_CLS_ENGINE_C3HB; else engine = MVPP22_CLS_ENGINE_C3HA; if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) return -EINVAL; mvpp2_cls_flow_eng_set(&fe, engine); mvpp2_cls_flow_write(port->priv, &fe); } return 0; } u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) { u16 hash_opts = 0; int n_fields, i, field; n_fields = mvpp2_cls_flow_hek_num_get(fe); for (i = 0; i < n_fields; i++) { field = mvpp2_cls_flow_hek_get(fe, i); switch (field) { case MVPP22_CLS_FIELD_MAC_DA: hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; break; case MVPP22_CLS_FIELD_VLAN: hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; break; case MVPP22_CLS_FIELD_VLAN_PRI: hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI; break; case MVPP22_CLS_FIELD_L3_PROTO: hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; break; case MVPP22_CLS_FIELD_IP4SA: hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; break; case MVPP22_CLS_FIELD_IP4DA: hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; break; case MVPP22_CLS_FIELD_IP6SA: hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; break; case MVPP22_CLS_FIELD_IP6DA: hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; break; case MVPP22_CLS_FIELD_L4SIP: hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; break; case MVPP22_CLS_FIELD_L4DIP: hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; break; default: break; } } return hash_opts; } /* Returns the hash opts for this flow. There are several classifier flows * for one traffic flow, this returns an aggregation of all configurations. */ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) { const struct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; int i, flow_index; u16 hash_opts = 0; for_each_cls_flow_id_with_type(i, flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return 0; flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); hash_opts |= mvpp2_flow_get_hek_fields(&fe); } return hash_opts; } static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) { const struct mvpp2_cls_flow *flow; int i; for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { flow = mvpp2_cls_flow_get(i); if (!flow) break; mvpp2_cls_flow_prs_init(priv, flow); mvpp2_cls_flow_lkp_init(priv, flow); mvpp2_cls_flow_init(priv, flow); } } static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) { struct mvpp2_cls_c2_entry c2; u8 qh, ql, pmap; memset(&c2, 0, sizeof(c2)); c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id); pmap = BIT(port->id); c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); /* Match on Lookup Type */ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL); /* Update RSS status after matching this entry */ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); /* Mark packet as "forwarded to software", needed for RSS */ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); /* Configure the default rx queue : Update Queue Low and Queue High, but * don't lock, since the rx queue selection might be overridden by RSS */ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) | MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD); qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | MVPP22_CLS_C2_ATTR0_QLOW(ql); c2.valid = true; mvpp2_cls_c2_write(port->priv, &c2); } /* Classifier default initialization */ void mvpp2_cls_init(struct mvpp2 *priv) { struct mvpp2_cls_lookup_entry le; struct mvpp2_cls_flow_entry fe; struct mvpp2_cls_c2_entry c2; int index; /* Enable classifier */ mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); /* Clear classifier flow table */ memset(&fe.data, 0, sizeof(fe.data)); for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { fe.index = index; mvpp2_cls_flow_write(priv, &fe); } /* Clear classifier lookup table */ le.data = 0; for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { le.lkpid = index; le.way = 0; mvpp2_cls_lookup_write(priv, &le); le.way = 1; mvpp2_cls_lookup_write(priv, &le); } /* Clear C2 TCAM engine table */ memset(&c2, 0, sizeof(c2)); c2.valid = false; for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) { c2.index = index; mvpp2_cls_c2_write(priv, &c2); } /* Disable the FIFO stages in C2 engine, which are only used in BIST * mode */ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL, MVPP22_CLS_C2_TCAM_BYPASS_FIFO); mvpp2_cls_port_init_flows(priv); } void mvpp2_cls_port_config(struct mvpp2_port *port) { struct mvpp2_cls_lookup_entry le; u32 val; /* Set way for the port */ val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); /* Pick the entry to be accessed in lookup ID decoding table * according to the way and lkpid. */ le.lkpid = port->id; le.way = 0; le.data = 0; /* Set initial CPU queue for receiving packets */ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; le.data |= port->first_rxq; /* Disable classification engines */ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; /* Update lookup ID table entry */ mvpp2_cls_lookup_write(port->priv, &le); mvpp2_port_c2_cls_init(port); } u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index) { mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index); return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR); } static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx) { struct mvpp2_cls_c2_entry c2; u8 qh, ql; mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); /* The RxQ number is used to select the RSS table. It that case, we set * it to be the ctx number. */ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK; c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | MVPP22_CLS_C2_ATTR0_QLOW(ql); c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; mvpp2_cls_c2_write(port->priv, &c2); } static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) { struct mvpp2_cls_c2_entry c2; u8 qh, ql; mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); /* Reset the default destination RxQ to the port's first rx queue. */ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | MVPP22_CLS_C2_ATTR0_QLOW(ql); c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN; mvpp2_cls_c2_write(port->priv, &c2); } static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx) { return port->rss_ctx[port_rss_ctx]; } int mvpp22_port_rss_enable(struct mvpp2_port *port) { if (mvpp22_rss_ctx(port, 0) < 0) return -EINVAL; mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0)); return 0; } int mvpp22_port_rss_disable(struct mvpp2_port *port) { if (mvpp22_rss_ctx(port, 0) < 0) return -EINVAL; mvpp2_rss_port_c2_disable(port); return 0; } static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry) { struct mvpp2_cls_c2_entry c2; mvpp2_cls_c2_read(port->priv, entry, &c2); /* Clear the port map so that the entry doesn't match anymore */ c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id))); mvpp2_cls_c2_write(port->priv, &c2); } /* Set CPU queue number for oversize packets */ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) { u32 val; mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port, struct mvpp2_rfs_rule *rule) { struct flow_action_entry *act; struct mvpp2_cls_c2_entry c2; u8 qh, ql, pmap; int index, ctx; if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL)) return -EOPNOTSUPP; memset(&c2, 0, sizeof(c2)); index = mvpp2_cls_c2_port_flow_index(port, rule->loc); if (index < 0) return -EINVAL; c2.index = index; act = &rule->flow->action.entries[0]; rule->c2_index = c2.index; c2.tcam[3] = (rule->c2_tcam & 0xffff) | ((rule->c2_tcam_mask & 0xffff) << 16); c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) | (((rule->c2_tcam_mask >> 16) & 0xffff) << 16); c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) | (((rule->c2_tcam_mask >> 32) & 0xffff) << 16); c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) | (((rule->c2_tcam_mask >> 48) & 0xffff) << 16); pmap = BIT(port->id); c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); /* Match on Lookup Type */ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc); if (act->id == FLOW_ACTION_DROP) { c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK); } else { /* We want to keep the default color derived from the Header * Parser drop entries, for VLAN and MAC filtering. This will * assign a default color of Green or Red, and we want matches * with a non-drop action to keep that color. */ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK); /* Update RSS status after matching this entry */ if (act->queue.ctx) c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; /* Always lock the RSS_EN decision. We might have high prio * rules steering to an RXQ, and a lower one steering to RSS, * we don't want the low prio RSS rule overwriting this flag. */ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); /* Mark packet as "forwarded to software", needed for RSS */ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) | MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK); if (act->queue.ctx) { /* Get the global ctx number */ ctx = mvpp22_rss_ctx(port, act->queue.ctx); if (ctx < 0) return -EINVAL; qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK; } else { qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK; } c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | MVPP22_CLS_C2_ATTR0_QLOW(ql); } c2.valid = true; mvpp2_cls_c2_write(port->priv, &c2); return 0; } static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port, struct mvpp2_rfs_rule *rule) { return mvpp2_port_c2_tcam_rule_add(port, rule); } static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port, struct mvpp2_rfs_rule *rule) { const struct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; int index, i; for_each_cls_flow_id_containing_type(i, rule->flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return 0; index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); mvpp2_cls_flow_read(port->priv, index, &fe); mvpp2_cls_flow_port_remove(&fe, BIT(port->id)); mvpp2_cls_flow_write(port->priv, &fe); } if (rule->c2_index >= 0) mvpp22_port_c2_lookup_disable(port, rule->c2_index); return 0; } static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port, struct mvpp2_rfs_rule *rule) { const struct mvpp2_cls_flow *flow; struct mvpp2 *priv = port->priv; struct mvpp2_cls_flow_entry fe; int index, ret, i; if (rule->engine != MVPP22_CLS_ENGINE_C2) return -EOPNOTSUPP; ret = mvpp2_port_c2_rfs_rule_insert(port, rule); if (ret) return ret; for_each_cls_flow_id_containing_type(i, rule->flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return 0; if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields) continue; index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); mvpp2_cls_flow_read(priv, index, &fe); mvpp2_cls_flow_eng_set(&fe, rule->engine); mvpp2_cls_flow_port_id_sel(&fe, true); mvpp2_flow_set_hek_fields(&fe, rule->hek_fields); mvpp2_cls_flow_lu_type_set(&fe, rule->loc); mvpp2_cls_flow_port_add(&fe, 0xf); mvpp2_cls_flow_write(priv, &fe); } return 0; } static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule) { struct flow_rule *flow = rule->flow; int offs = 0; /* The order of insertion in C2 tcam must match the order in which * the fields are found in the header */ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(flow, &match); if (match.mask->vlan_id) { rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN; rule->c2_tcam |= ((u64)match.key->vlan_id) << offs; rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs; /* Don't update the offset yet */ } if (match.mask->vlan_priority) { rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI; /* VLAN pri is always at offset 13 relative to the * current offset */ rule->c2_tcam |= ((u64)match.key->vlan_priority) << (offs + 13); rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) << (offs + 13); } if (match.mask->vlan_dei) return -EOPNOTSUPP; /* vlan id and prio always seem to take a full 16-bit slot in * the Header Extracted Key. */ offs += 16; } if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(flow, &match); if (match.mask->src) { rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP; rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs; rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs; offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP); } if (match.mask->dst) { rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP; rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs; rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs; offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP); } } if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS) return -EOPNOTSUPP; return 0; } static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule) { struct flow_rule *flow = rule->flow; struct flow_action_entry *act; if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL)) return -EOPNOTSUPP; act = &flow->action.entries[0]; if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP) return -EOPNOTSUPP; /* When both an RSS context and an queue index are set, the index * is considered as an offset to be added to the indirection table * entries. We don't support this, so reject this rule. */ if (act->queue.ctx && act->queue.index) return -EOPNOTSUPP; /* For now, only use the C2 engine which has a HEK size limited to 64 * bits for TCAM matching. */ rule->engine = MVPP22_CLS_ENGINE_C2; if (mvpp2_cls_c2_build_match(rule)) return -EINVAL; return 0; } int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, struct ethtool_rxnfc *rxnfc) { struct mvpp2_ethtool_fs *efs; if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) return -EINVAL; efs = port->rfs_rules[rxnfc->fs.location]; if (!efs) return -ENOENT; memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc)); return 0; } int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec_input input = {}; struct ethtool_rx_flow_rule *ethtool_rule; struct mvpp2_ethtool_fs *efs, *old_efs; int ret = 0; if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) return -EINVAL; efs = kzalloc(sizeof(*efs), GFP_KERNEL); if (!efs) return -ENOMEM; input.fs = &info->fs; /* We need to manually set the rss_ctx, since this info isn't present * in info->fs */ if (info->fs.flow_type & FLOW_RSS) input.rss_ctx = info->rss_context; ethtool_rule = ethtool_rx_flow_rule_create(&input); if (IS_ERR(ethtool_rule)) { ret = PTR_ERR(ethtool_rule); goto clean_rule; } efs->rule.flow = ethtool_rule->rule; efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); if (efs->rule.flow_type < 0) { ret = efs->rule.flow_type; goto clean_rule; } ret = mvpp2_cls_rfs_parse_rule(&efs->rule); if (ret) goto clean_eth_rule; efs->rule.loc = info->fs.location; /* Replace an already existing rule */ if (port->rfs_rules[efs->rule.loc]) { old_efs = port->rfs_rules[efs->rule.loc]; ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule); if (ret) goto clean_eth_rule; kfree(old_efs); port->n_rfs_rules--; } ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule); if (ret) goto clean_eth_rule; ethtool_rx_flow_rule_destroy(ethtool_rule); efs->rule.flow = NULL; memcpy(&efs->rxnfc, info, sizeof(*info)); port->rfs_rules[efs->rule.loc] = efs; port->n_rfs_rules++; return ret; clean_eth_rule: ethtool_rx_flow_rule_destroy(ethtool_rule); clean_rule: kfree(efs); return ret; } int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, struct ethtool_rxnfc *info) { struct mvpp2_ethtool_fs *efs; int ret; if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) return -EINVAL; efs = port->rfs_rules[info->fs.location]; if (!efs) return -EINVAL; /* Remove the rule from the engines. */ ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule); if (ret) return ret; port->n_rfs_rules--; port->rfs_rules[info->fs.location] = NULL; kfree(efs); return 0; } static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) { int nrxqs, cpu, cpus = num_possible_cpus(); /* Number of RXQs per CPU */ nrxqs = port->nrxqs / cpus; /* CPU that will handle this rx queue */ cpu = rxq / nrxqs; if (!cpu_online(cpu)) return port->first_rxq; /* Indirection to better distribute the paquets on the CPUs when * configuring the RSS queues. */ return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs); } static void mvpp22_rss_fill_table(struct mvpp2_port *port, struct mvpp2_rss_table *table, u32 rss_ctx) { struct mvpp2 *priv = port->priv; int i; for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) | MVPP22_RSS_INDEX_TABLE_ENTRY(i); mvpp2_write(priv, MVPP22_RSS_INDEX, sel); mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, mvpp22_rxfh_indir(port, table->indir[i])); } } static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx) { struct mvpp2 *priv = port->priv; u32 ctx; /* Find the first free RSS table */ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) { if (!priv->rss_tables[ctx]) break; } if (ctx == MVPP22_N_RSS_TABLES) return -EINVAL; priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]), GFP_KERNEL); if (!priv->rss_tables[ctx]) return -ENOMEM; *rss_ctx = ctx; /* Set the table width: replace the whole classifier Rx queue number * with the ones configured in RSS table entries. */ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx)); mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx)); mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx)); return 0; } int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx) { u32 rss_ctx; int ret, i; ret = mvpp22_rss_context_create(port, &rss_ctx); if (ret) return ret; /* Find the first available context number in the port, starting from 1. * Context 0 on each port is reserved for the default context. */ for (i = 1; i < MVPP22_N_RSS_TABLES; i++) { if (port->rss_ctx[i] < 0) break; } if (i == MVPP22_N_RSS_TABLES) return -EINVAL; port->rss_ctx[i] = rss_ctx; *port_ctx = i; return 0; } static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv, int rss_ctx) { if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) return NULL; return priv->rss_tables[rss_ctx]; } int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx) { struct mvpp2 *priv = port->priv; struct ethtool_rxnfc *rxnfc; int i, rss_ctx, ret; rss_ctx = mvpp22_rss_ctx(port, port_ctx); if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES) return -EINVAL; /* Invalidate any active classification rule that use this context */ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { if (!port->rfs_rules[i]) continue; rxnfc = &port->rfs_rules[i]->rxnfc; if (!(rxnfc->fs.flow_type & FLOW_RSS) || rxnfc->rss_context != port_ctx) continue; ret = mvpp2_ethtool_cls_rule_del(port, rxnfc); if (ret) { netdev_warn(port->dev, "couldn't remove classification rule %d associated to this context", rxnfc->fs.location); } } kfree(priv->rss_tables[rss_ctx]); priv->rss_tables[rss_ctx] = NULL; port->rss_ctx[port_ctx] = -1; return 0; } int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx, const u32 *indir) { int rss_ctx = mvpp22_rss_ctx(port, port_ctx); struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv, rss_ctx); if (!rss_table) return -EINVAL; memcpy(rss_table->indir, indir, MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0])); mvpp22_rss_fill_table(port, rss_table, rss_ctx); return 0; } int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx, u32 *indir) { int rss_ctx = mvpp22_rss_ctx(port, port_ctx); struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv, rss_ctx); if (!rss_table) return -EINVAL; memcpy(indir, rss_table->indir, MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0])); return 0; } int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) { u16 hash_opts = 0; u32 flow_type; flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); switch (flow_type) { case MVPP22_FLOW_TCP4: case MVPP22_FLOW_UDP4: case MVPP22_FLOW_TCP6: case MVPP22_FLOW_UDP6: if (info->data & RXH_L4_B_0_1) hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; if (info->data & RXH_L4_B_2_3) hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; fallthrough; case MVPP22_FLOW_IP4: case MVPP22_FLOW_IP6: if (info->data & RXH_L2DA) hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; if (info->data & RXH_VLAN) hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; if (info->data & RXH_L3_PROTO) hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; if (info->data & RXH_IP_SRC) hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA | MVPP22_CLS_HEK_OPT_IP6SA); if (info->data & RXH_IP_DST) hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA | MVPP22_CLS_HEK_OPT_IP6DA); break; default: return -EOPNOTSUPP; } return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); } int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) { unsigned long hash_opts; u32 flow_type; int i; flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type); info->data = 0; for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { switch (BIT(i)) { case MVPP22_CLS_HEK_OPT_MAC_DA: info->data |= RXH_L2DA; break; case MVPP22_CLS_HEK_OPT_VLAN: info->data |= RXH_VLAN; break; case MVPP22_CLS_HEK_OPT_L3_PROTO: info->data |= RXH_L3_PROTO; break; case MVPP22_CLS_HEK_OPT_IP4SA: case MVPP22_CLS_HEK_OPT_IP6SA: info->data |= RXH_IP_SRC; break; case MVPP22_CLS_HEK_OPT_IP4DA: case MVPP22_CLS_HEK_OPT_IP6DA: info->data |= RXH_IP_DST; break; case MVPP22_CLS_HEK_OPT_L4SIP: info->data |= RXH_L4_B_0_1; break; case MVPP22_CLS_HEK_OPT_L4DIP: info->data |= RXH_L4_B_2_3; break; default: return -EINVAL; } } return 0; } int mvpp22_port_rss_init(struct mvpp2_port *port) { struct mvpp2_rss_table *table; u32 context = 0; int i, ret; for (i = 0; i < MVPP22_N_RSS_TABLES; i++) port->rss_ctx[i] = -1; ret = mvpp22_rss_context_create(port, &context); if (ret) return ret; table = mvpp22_rss_table_get(port->priv, context); if (!table) return -EINVAL; port->rss_ctx[0] = context; /* Configure the first table to evenly distribute the packets across * real Rx Queues. The table entries map a hash to a port Rx Queue. */ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0)); /* Configure default flows */ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T); mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T); mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T); mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T); mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T); mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T); return 0; }
linux-master
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
// SPDX-License-Identifier: GPL-2.0 /* * Header Parser helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <[email protected]> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> #include "mvpp2.h" #include "mvpp2_prs.h" /* Update parser tcam and sram hw entries */ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; /* Clear entry invalidation bit */ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); return 0; } /* Initialize tcam entry from hw */ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, int tid) { int i; if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; memset(pe, 0, sizeof(*pe)); pe->index = tid; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { /* Write index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), MVPP2_PRS_TCAM_INV_MASK); } /* Enable shadow table entry and set its lookup ID */ static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) { priv->prs_shadow[index].valid = true; priv->prs_shadow[index].lu = lu; } /* Update ri fields in shadow table entry */ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, unsigned int ri, unsigned int ri_mask) { priv->prs_shadow[index].ri_mask = ri_mask; priv->prs_shadow[index].ri = ri; } /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { if (add) pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); else pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); } /* Obtain port map from tcam sw entry */ unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); } /* Get byte of data and its enable bits from tcam sw entry */ void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char *byte, unsigned char *enable) { int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { u16 tcam_data; tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; return tcam_data == data; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { int i; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); else pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); } pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; } /* Set ethertype in tcam sw entry */ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, unsigned short ethertype) { mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); } /* Set vid in tcam sw entry */ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, unsigned short vid) { mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); } /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, u32 val) { pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, u32 val) { pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Update ri bits in sram sw entry */ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, 1); else mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_RI_OFFS + i, 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } } /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, 1); else mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_AI_OFFS + i, 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } } /* Read ai bits from sram sw entry */ static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; /* ai is stored on bits 90->97; so it spreads across two u32 */ int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); bits = (pe->sram[ai_off] >> ai_shift) | (pe->sram[ai_off + 1] << (32 - ai_shift)); return bits; } /* In sram sw entry set lookup ID field of the tcam key to be used in the next * lookup interation */ static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK); mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); } /* In the sram sw entry set sign and value of the next lookup offset * and the offset value generated to the classifier */ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, unsigned int op) { /* Set sign */ if (shift < 0) { mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); shift = 0 - shift; } else { mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); } /* Set value */ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |= shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); } /* In the sram sw entry set sign and value of the user defined offset * generated to the classifier */ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, unsigned int type, int offset, unsigned int op) { /* Set sign */ if (offset < 0) { mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); offset = 0 - offset; } else { mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); } /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset & MVPP2_PRS_SRAM_UDF_MASK); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, MVPP2_PRS_SRAM_UDF_TYPE_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); } /* Find parser flow entry */ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) { struct mvpp2_prs_entry pe; int tid; /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { u8 bits; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) return tid; } return -ENOENT; } /* Return first free tcam index, seeking from start to end */ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, unsigned char end) { int tid; if (start > end) swap(start, end); for (tid = start; tid <= end; tid++) { if (!priv->prs_shadow[tid].valid) return tid; } return -EINVAL; } /* Drop flow control pause frames */ static void mvpp2_prs_drop_fc(struct mvpp2 *priv) { unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 }; struct mvpp2_prs_entry pe; unsigned int len; memset(&pe, 0, sizeof(pe)); /* For all ports - drop flow control frames */ pe.index = MVPP2_PE_FC_DROP; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); /* Set match on DA */ len = ETH_ALEN; while (len--) mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); } /* Enable/disable dropping all mac da's */ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) { struct mvpp2_prs_entry pe; if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_DROP_ALL; /* Non-promiscuous mode for all ports - DROP unknown packets */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set port to unicast or multicast promiscuous mode */ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, enum mvpp2_prs_l2_cast l2_cast, bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; ri = MVPP2_PRS_RI_L2_UCAST; } else { cast_match = MVPP2_PRS_MCAST_VAL; tid = MVPP2_PE_MAC_MC_PROMISCUOUS; ri = MVPP2_PRS_RI_L2_MCAST; } /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = tid; /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set result info bits */ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); /* Match UC or MC addresses */ mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, MVPP2_PRS_CAST_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) { struct mvpp2_prs_entry pe; int tid, shift; if (extend) { tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; shift = 8; } else { tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; shift = 4; } if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT); /* Set ai bits for next iteration */ if (extend) mvpp2_prs_sram_ai_update(&pe, 1, MVPP2_PRS_SRAM_AI_MASK); else mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* Set result info bits to 'single vlan' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); /* If packet is tagged continue check vid filtering */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); } else { /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); } /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set entry for dsa ethertype */ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) { struct mvpp2_prs_entry pe; int tid, shift, port_mask; if (extend) { tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : MVPP2_PE_ETYPE_EDSA_UNTAGGED; port_mask = 0; shift = 8; } else { tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : MVPP2_PE_ETYPE_DSA_UNTAGGED; port_mask = MVPP2_PRS_PORT_MASK; shift = 4; } if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; /* Set ethertype */ mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); mvpp2_prs_match_etype(&pe, 2, 0); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, MVPP2_PRS_RI_DSA_MASK); /* Shift ethertype + 2 byte reserved + tag*/ mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN + 2 + 3, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* If packet is tagged continue check vlans */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); } else { /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); } /* Mask/unmask all ports, depending on dsa type */ mvpp2_prs_tcam_port_map_set(&pe, port_mask); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Search for existing single/triple vlan entry */ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) { struct mvpp2_prs_entry pe; int tid; /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned int ri_bits, ai_bits; bool match; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; /* Get vlan type */ ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; /* Get current ai value from tcam */ ai_bits = mvpp2_prs_tcam_ai_get(&pe); /* Clear double vlan bit */ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; if (ai != ai_bits) continue; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) return tid; } return -ENOENT; } /* Add/update single/triple vlan entry */ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, unsigned int port_map) { struct mvpp2_prs_entry pe; int tid_aux, tid; int ret = 0; memset(&pe, 0, sizeof(pe)); tid = mvpp2_prs_vlan_find(priv, tpid, ai); if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; /* Get last double vlan tid */ for (tid_aux = MVPP2_PE_LAST_FREE_TID; tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { unsigned int ri_bits; if (!priv->prs_shadow[tid_aux].valid || priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) break; } if (tid <= tid_aux) return -EINVAL; memset(&pe, 0, sizeof(pe)); pe.index = tid; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); mvpp2_prs_match_etype(&pe, 0, tpid); /* VLAN tag detected, proceed with VID filtering */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); } else { ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK); } mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); mvpp2_prs_hw_write(priv, &pe); return ret; } /* Get first free double vlan ai number */ static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) { int i; for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { if (!priv->prs_double_vlans[i]) return i; } return -EINVAL; } /* Search for existing double vlan entry */ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2) { struct mvpp2_prs_entry pe; int tid; /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned int ri_mask; bool match; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); if (!match) continue; ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) return tid; } return -ENOENT; } /* Add or update double vlan entry */ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2, unsigned int port_map) { int tid_aux, tid, ai, ret = 0; struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(pe)); tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); if (tid < 0) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); if (ai < 0) return ai; /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { unsigned int ri_bits; if (!priv->prs_shadow[tid_aux].valid || priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) break; } if (tid >= tid_aux) return -ERANGE; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = tid; priv->prs_double_vlans[ai] = true; mvpp2_prs_match_etype(&pe, 0, tpid1); mvpp2_prs_match_etype(&pe, 4, tpid2); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); /* Shift 4 bytes - skip outer vlan tag */ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); mvpp2_prs_hw_write(priv, &pe); return ret; } /* IPv4 header parsing for fragmentation and L4 offset */ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, unsigned int ri, unsigned int ri_mask) { struct mvpp2_prs_entry pe; int tid; if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && (proto != IPPROTO_IGMP)) return -EINVAL; /* Not fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; pe.index = tid; /* Clear ri before updating */ pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* IPv4 L3 multicast or broadcast */ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) { struct mvpp2_prs_entry pe; int mask, tid; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; switch (l3_cast) { case MVPP2_PRS_L3_MULTI_CAST: mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, MVPP2_PRS_IPV4_MC_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); break; case MVPP2_PRS_L3_BROAD_CAST: mask = MVPP2_PRS_IPV4_BC_MASK; mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, MVPP2_PRS_RI_L3_ADDR_MASK); break; default: return -EINVAL; } /* Go again to ipv4 */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Shift back to IPv4 proto */ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Set entries for protocols over IPv6 */ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, unsigned int ri, unsigned int ri_mask) { struct mvpp2_prs_entry pe; int tid; if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) return -EINVAL; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct ipv6hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Write HW */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* IPv6 L3 multicast entry */ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) { struct mvpp2_prs_entry pe; int tid; if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) return -EINVAL; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Shift back to IPv6 NH */ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, MVPP2_PRS_IPV6_MC_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Parser per-port initialization */ static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, int lu_max, int offset) { u32 val; /* Set lookup ID */ val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); val &= ~MVPP2_PRS_PORT_LU_MASK(port); val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); /* Set maximum number of loops for packet received from port */ val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); /* Set initial offset for packet header extraction for the first * searching loop */ val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); val &= ~MVPP2_PRS_INIT_OFF_MASK(port); val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); } /* Default flow entries initialization for all ports */ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Set flow ID*/ mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); mvpp2_prs_hw_write(priv, &pe); } } /* Set default entry for Marvell Header field */ static void mvpp2_prs_mh_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(pe)); pe.index = MVPP2_PE_MH_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); mvpp2_prs_hw_write(priv, &pe); /* Set MH entry that skip parser */ pe.index = MVPP2_PE_MH_SKIP_PRS; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); mvpp2_prs_hw_write(priv, &pe); } /* Set default entires (place holder) for promiscuous, non-promiscuous and * multicast MAC addresses */ static void mvpp2_prs_mac_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(pe)); /* Non-promiscuous mode for all ports - DROP unknown packets */ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; /* None tagged EDSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Tagged EDSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); /* None tagged DSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Tagged DSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* None tagged EDSA ethertype entry - place holder*/ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Tagged EDSA ethertype entry - place holder*/ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); /* None tagged DSA ethertype entry */ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Tagged DSA ethertype entry */ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* Set default entry, in case DSA or EDSA tag not found */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = MVPP2_PE_DSA_DEFAULT; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); /* Shift 0 bytes */ mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); /* Clear all sram ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); } /* Initialize parser entries for VID filtering */ static void mvpp2_prs_vid_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(pe)); /* Set default vid entry */ pe.index = MVPP2_PE_VID_FLTR_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); /* Skip VLAN header - Set offset to 4 bytes */ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); /* Set default vid entry for extended DSA*/ memset(&pe, 0, sizeof(pe)); /* Set default vid entry */ pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, MVPP2_PRS_EDSA_VID_AI_BIT); /* Skip VLAN header - Set offset to 8 bytes */ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); } /* Match basic ethertypes */ static int mvpp2_prs_etype_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid, ihl; /* Ethertype: PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: ARP */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); /* Generate flow in the next iteration*/ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: LBTD */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); /* Generate flow in the next iteration*/ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: IPv4 with header length >= 5 */ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) { tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD | ihl, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + sizeof(struct iphdr) - 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L4 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, MVPP2_ETH_TYPE_LEN + (ihl * 4), MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); } /* Ethertype: IPv6 without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); /* Skip DIP of IPV6 header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = MVPP2_PE_ETH_TYPE_UN; /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Generate flow in the next iteration*/ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset even it's unknown L3 */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Configure vlan entries and detect up to 2 successive VLAN tags. * Possible options: * 0x8100, 0x88A8 * 0x8100, 0x8100 * 0x8100 * 0x88A8 */ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int err; priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), MVPP2_PRS_DBL_VLANS_MAX, GFP_KERNEL); if (!priv->prs_double_vlans) return -ENOMEM; /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); if (err) return err; /* Double VLAN: 0x8100, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); if (err) return err; /* Single VLAN: 0x88a8 */ err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); if (err) return err; /* Single VLAN: 0x8100 */ err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); if (err) return err; /* Set default double vlan entry */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); /* Clear ai for next iterations */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_DBL_VLAN_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); mvpp2_prs_hw_write(priv, &pe); /* Set default vlan none entry */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_NONE; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Set entries for PPPoE ethertype */ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid, ihl; /* IPv4 over PPPoE with header length >= 5 */ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) { tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, PPP_IP); mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD | ihl, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + sizeof(struct iphdr) - 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Set L4 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, MVPP2_ETH_TYPE_LEN + (ihl * 4), MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); } /* IPv6 over PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); /* Jump to DIP of IPV6 header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); /* Non-IP over PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); /* Set L3 offset even if it's unknown L3 */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Initialize entries for IPv4 */ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int err; /* Set entries for TCP, UDP and IGMP over IPv4 */ err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); if (err) return err; /* IPv4 Broadcast */ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); if (err) return err; /* IPv4 Multicast */ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); if (err) return err; /* Default IPv4 entry for unknown protocols */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_PROTO_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv4 entry for unicast address */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_ADDR_UN; /* Go again to ipv4 */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Shift back to IPv4 proto */ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Initialize entries for IPv6 */ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid, err; /* Set entries for TCP, UDP and ICMP over IPv6 */ err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); if (err) return err; /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ /* Result Info: UDF7=1, DS lite */ err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE, MVPP2_PRS_RI_UDF7_MASK); if (err) return err; /* IPv6 multicast */ err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); if (err) return err; /* Entry for checking hop limit */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown protocols */ memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_PROTO_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); /* Set L4 offset relatively to our current place */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct ipv6hdr) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown ext protocols */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, MVPP2_PRS_IPV6_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unicast address */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_ADDR_UN; /* Finished: go to IPv6 again */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Shift back to IPV6 NH */ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Find tcam entry with matched pair <vid,port> */ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) { unsigned char byte[2], enable[2]; struct mvpp2_prs_entry pe; u16 rvid, rmask; int tid; /* Go through the all entries with MVPP2_PRS_LU_VID */ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (!port->priv->prs_shadow[tid].valid || port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); rvid = ((byte[0] & 0xf) << 8) + byte[1]; rmask = ((enable[0] & 0xf) << 8) + enable[1]; if (rvid != vid || rmask != mask) continue; return tid; } return -ENOENT; } /* Write parser entry for VID filtering */ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) { unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + port->id * MVPP2_PRS_VLAN_FILT_MAX; unsigned int mask = 0xfff, reg_val, shift; struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; int tid; memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) shift = MVPP2_VLAN_TAG_EDSA_LEN; else shift = MVPP2_VLAN_TAG_LEN; /* No such entry */ if (tid < 0) { /* Go through all entries from first to last in vlan range */ tid = mvpp2_prs_tcam_first_free(priv, vid_start, vid_start + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ if (tid < 0) return tid; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ mvpp2_prs_tcam_port_set(&pe, port->id, true); /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); /* Skip VLAN header - Set offset to 4 or 8 bytes */ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set match on VID */ mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Write parser entry for VID filtering */ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) { struct mvpp2 *priv = port->priv; int tid; /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); /* No such entry */ if (tid < 0) return; mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; } /* Remove all existing VID filters on this port */ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; int tid; for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; } } } /* Remove VID filering entry for this port */ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) { unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; } /* Add guard entry that drops packets when no VID is matched on this port */ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) { unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; unsigned int reg_val, shift; struct mvpp2_prs_entry pe; if (priv->prs_shadow[tid].valid) return; memset(&pe, 0, sizeof(pe)); pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); if (reg_val & MVPP2_DSA_EXTENDED) shift = MVPP2_VLAN_TAG_EDSA_LEN; else shift = MVPP2_VLAN_TAG_LEN; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port->id, true); /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); /* Skip VLAN header - Set offset to 4 or 8 bytes */ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Drop VLAN packets that don't belong to any VIDs on this port */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); } /* Parser default initialization */ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); /* Clear all tcam and sram entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); } /* Invalidate all tcam entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, sizeof(*priv->prs_shadow), GFP_KERNEL); if (!priv->prs_shadow) return -ENOMEM; /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, MVPP2_PRS_PORT_LU_MAX, 0); mvpp2_prs_def_flow_init(priv); mvpp2_prs_mh_init(priv); mvpp2_prs_mac_init(priv); mvpp2_prs_dsa_init(priv); mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); if (err) return err; err = mvpp2_prs_vlan_init(pdev, priv); if (err) return err; err = mvpp2_prs_pppoe_init(priv); if (err) return err; err = mvpp2_prs_ip6_init(priv); if (err) return err; err = mvpp2_prs_ip4_init(priv); if (err) return err; return 0; } /* Compare MAC DA with tcam entry data */ static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const u8 *da, unsigned char *mask) { unsigned char tcam_byte, tcam_mask; int index; for (index = 0; index < ETH_ALEN; index++) { mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); if (tcam_mask != mask[index]) return false; if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) return false; } return true; } /* Find tcam entry with matched pair <MAC DA, port> */ static int mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, unsigned char *mask, int udf_type) { struct mvpp2_prs_entry pe; int tid; /* Go through the all entires with MVPP2_PRS_LU_MAC */ for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned int entry_pmap; if (!priv->prs_shadow[tid].valid || (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || (priv->prs_shadow[tid].udf != udf_type)) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && entry_pmap == pmap) return tid; } return -ENOENT; } /* Update parser's mac da entry */ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; unsigned int pmap, len, ri; struct mvpp2_prs_entry pe; int tid; memset(&pe, 0, sizeof(pe)); /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, MVPP2_PRS_UDF_MAC_DEF); /* No such entry */ if (tid < 0) { if (!add) return 0; /* Create new TCAM entry */ /* Go through the all entries from first to last */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_MAC_RANGE_START, MVPP2_PE_MAC_RANGE_END); if (tid < 0) return tid; pe.index = tid; /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port->id, add); /* Invalidate the entry if no ports are left enabled */ pmap = mvpp2_prs_tcam_port_map_get(&pe); if (pmap == 0) { if (add) return -EINVAL; mvpp2_prs_hw_inv(priv, pe.index); priv->prs_shadow[pe.index].valid = false; return 0; } /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set match on DA */ len = ETH_ALEN; while (len--) mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); /* Set result info bits */ if (is_broadcast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_BCAST; } else if (is_multicast_ether_addr(da)) { ri = MVPP2_PRS_RI_L2_MCAST; } else { ri = MVPP2_PRS_RI_L2_UCAST; if (ether_addr_equal(da, port->dev->dev_addr)) ri |= MVPP2_PRS_RI_MAC_ME_MASK; } mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table and hw entry */ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); return 0; } int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); int err; /* Remove old parser entry */ err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); if (err) return err; /* Add new parser entry */ err = mvpp2_prs_mac_da_accept(port, da, true); if (err) return err; /* Set addr in the device */ eth_hw_addr_set(dev, da); return 0; } void mvpp2_prs_mac_del_all(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; unsigned long pmap; int index, tid; for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; if (!priv->prs_shadow[tid].valid || (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); /* We only want entries active on this port */ if (!test_bit(port->id, &pmap)) continue; /* Read mac addr from entry */ for (index = 0; index < ETH_ALEN; index++) mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], &da_mask[index]); /* Special cases : Don't remove broadcast and port's own * address */ if (is_broadcast_ether_addr(da) || ether_addr_equal(da, port->dev->dev_addr)) continue; /* Remove entry from TCAM */ mvpp2_prs_mac_da_accept(port, da, false); } } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Remove port from DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); break; case MVPP2_TAG_TYPE_DSA: /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Remove port from EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); break; default: if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) return -EINVAL; } return 0; } int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) { struct mvpp2_prs_entry pe; u8 *ri_byte, *ri_byte_mask; int tid, i; memset(&pe, 0, sizeof(pe)); tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; pe.index = tid; ri_byte = (u8 *)&ri; ri_byte_mask = (u8 *)&ri_mask; mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); for (i = 0; i < 4; i++) { mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], ri_byte_mask[i]); } mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Set prs flow for the port */ int mvpp2_prs_def_flow(struct mvpp2_port *port) { struct mvpp2_prs_entry pe; int tid; memset(&pe, 0, sizeof(pe)); tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ if (tid < 0) { /* Go through the all entires from last to first */ tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; pe.index = tid; /* Set flow ID*/ mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); return 0; } int mvpp2_prs_hits(struct mvpp2 *priv, int index) { u32 val; if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; return val; }
linux-master
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <[email protected]> */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/mbus.h> #include <linux/module.h> #include <linux/mfd/syscon.h> #include <linux/interrupt.h> #include <linux/cpumask.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> #include <linux/phy.h> #include <linux/phylink.h> #include <linux/phy/phy.h> #include <linux/ptp_classify.h> #include <linux/clk.h> #include <linux/hrtimer.h> #include <linux/ktime.h> #include <linux/regmap.h> #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bpf_trace.h> #include "mvpp2.h" #include "mvpp2_prs.h" #include "mvpp2_cls.h" enum mvpp2_bm_pool_log_num { MVPP2_BM_SHORT, MVPP2_BM_LONG, MVPP2_BM_JUMBO, MVPP2_BM_POOLS_NUM }; static struct { int pkt_size; int buf_num; } mvpp2_pools[MVPP2_BM_POOLS_NUM]; /* The prototype is added here to be used in start_dev when using ACPI. This * will be removed once phylink is used for all modes (dt+ACPI). */ static void mvpp2_acpi_start(struct mvpp2_port *port); /* Queue modes */ #define MVPP2_QDIST_SINGLE_MODE 0 #define MVPP2_QDIST_MULTI_MODE 1 static int queue_mode = MVPP2_QDIST_MULTI_MODE; module_param(queue_mode, int, 0444); MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); /* Utility/helper methods */ void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { writel(data, priv->swth_base[0] + offset); } u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { return readl(priv->swth_base[0] + offset); } static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) { return cpu % priv->nthreads; } static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) { writel(data, priv->cm3_base + offset); } static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) { return readl(priv->cm3_base + offset); } static struct page_pool * mvpp2_create_page_pool(struct device *dev, int num, int len, enum dma_data_direction dma_dir) { struct page_pool_params pp_params = { /* internal DMA mapping in page_pool */ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = num, .nid = NUMA_NO_NODE, .dev = dev, .dma_dir = dma_dir, .offset = MVPP2_SKB_HEADROOM, .max_len = len, }; return page_pool_create(&pp_params); } /* These accessors should be used to access: * * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG * MVPP2_BM_ADDR_HIGH_ALLOC * MVPP22_BM_ADDR_HIGH_RLS_REG * MVPP2_BM_VIRT_RLS_REG * MVPP2_ISR_RX_TX_CAUSE_REG * MVPP2_ISR_RX_TX_MASK_REG * MVPP2_TXQ_NUM_REG * MVPP2_AGGR_TXQ_UPDATE_REG * MVPP2_TXQ_RSVD_REQ_REG * MVPP2_TXQ_RSVD_RSLT_REG * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * * - global registers that must be accessed through a specific thread * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { writel(data, priv->swth_base[thread] + offset); } static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { return readl(priv->swth_base[thread] + offset); } static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { writel_relaxed(data, priv->swth_base[thread] + offset); } static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { return readl_relaxed(priv->swth_base[thread] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) return le32_to_cpu(tx_desc->pp21.buf_dma_addr); else return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, dma_addr_t dma_addr) { dma_addr_t addr, offset; addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; offset = dma_addr & MVPP2_TX_DESC_ALIGN; if (port->priv->hw_version == MVPP21) { tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); tx_desc->pp21.packet_offset = offset; } else { __le64 val = cpu_to_le64(addr); tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } } static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) return le16_to_cpu(tx_desc->pp21.data_size); else return le16_to_cpu(tx_desc->pp22.data_size); } static void mvpp2_txdesc_size_set(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, size_t size) { if (port->priv->hw_version == MVPP21) tx_desc->pp21.data_size = cpu_to_le16(size); else tx_desc->pp22.data_size = cpu_to_le16(size); } static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, unsigned int txq) { if (port->priv->hw_version == MVPP21) tx_desc->pp21.phys_txq = txq; else tx_desc->pp22.phys_txq = txq; } static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, unsigned int command) { if (port->priv->hw_version == MVPP21) tx_desc->pp21.command = cpu_to_le32(command); else tx_desc->pp22.command = cpu_to_le32(command); } static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) return tx_desc->pp21.packet_offset; else return tx_desc->pp22.packet_offset; } static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) return le32_to_cpu(rx_desc->pp21.buf_dma_addr); else return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) return le32_to_cpu(rx_desc->pp21.buf_cookie); else return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) return le16_to_cpu(rx_desc->pp21.data_size); else return le16_to_cpu(rx_desc->pp22.data_size); } static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) return le32_to_cpu(rx_desc->pp21.status); else return le32_to_cpu(rx_desc->pp22.status); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) { txq_pcpu->txq_get_index++; if (txq_pcpu->txq_get_index == txq_pcpu->size) txq_pcpu->txq_get_index = 0; } static void mvpp2_txq_inc_put(struct mvpp2_port *port, struct mvpp2_txq_pcpu *txq_pcpu, void *data, struct mvpp2_tx_desc *tx_desc, enum mvpp2_tx_buf_type buf_type) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_put_index; tx_buf->type = buf_type; if (buf_type == MVPP2_TYPE_SKB) tx_buf->skb = data; else tx_buf->xdpf = data; tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + mvpp2_txdesc_offset_get(port, tx_desc); txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; } /* Get number of maximum RXQ */ static int mvpp2_get_nrxqs(struct mvpp2 *priv) { unsigned int nrxqs; if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) return 1; /* According to the PPv2.2 datasheet and our experiments on * PPv2.1, RX queues have an allocation granularity of 4 (when * more than a single one on PPv2.2). * Round up to nearest multiple of 4. */ nrxqs = (num_possible_cpus() + 3) & ~0x3; if (nrxqs > MVPP2_PORT_MAX_RXQ) nrxqs = MVPP2_PORT_MAX_RXQ; return nrxqs; } /* Get number of physical egress port */ static inline int mvpp2_egress_port(struct mvpp2_port *port) { return MVPP2_MAX_TCONT + port->id; } /* Get number of physical TXQ */ static inline int mvpp2_txq_phys(int port, int txq) { return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; } /* Returns a struct page if page_pool is set, otherwise a buffer */ static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, struct page_pool *page_pool) { if (page_pool) return page_pool_dev_alloc_pages(page_pool); if (likely(pool->frag_size <= PAGE_SIZE)) return netdev_alloc_frag(pool->frag_size); return kmalloc(pool->frag_size, GFP_ATOMIC); } static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, struct page_pool *page_pool, void *data) { if (page_pool) page_pool_put_full_page(page_pool, virt_to_head_page(data), false); else if (likely(pool->frag_size <= PAGE_SIZE)) skb_free_frag(data); else kfree(data); } /* Buffer Manager configuration routines */ /* Create pool */ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int size) { u32 val; /* Number of buffer pointers must be a multiple of 16, as per * hardware constraints */ if (!IS_ALIGNED(size, 16)) return -EINVAL; /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 * bytes per buffer pointer */ if (priv->hw_version == MVPP21) bm_pool->size_bytes = 2 * sizeof(u32) * size; else bm_pool->size_bytes = 2 * sizeof(u64) * size; bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, &bm_pool->dma_addr, GFP_KERNEL); if (!bm_pool->virt_addr) return -ENOMEM; if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, bm_pool->dma_addr); dev_err(dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), lower_32_bits(bm_pool->dma_addr)); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); val |= MVPP2_BM_START_MASK; val &= ~MVPP2_BM_LOW_THRESH_MASK; val &= ~MVPP2_BM_HIGH_THRESH_MASK; /* Set 8 Pools BPPI threshold for MVPP23 */ if (priv->hw_version == MVPP23) { val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); } else { val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); } mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; return 0; } /* Set pool buffer size */ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int buf_size) { u32 val; bm_pool->buf_size = buf_size; val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); } static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, dma_addr_t *dma_addr, phys_addr_t *phys_addr) { unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); if (priv->hw_version >= MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits; val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; if (sizeof(dma_addr_t) == 8) *dma_addr |= (u64)dma_addr_highbits << 32; if (sizeof(phys_addr_t) == 8) *phys_addr |= (u64)phys_addr_highbits << 32; } put_cpu(); } /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int buf_num) { struct page_pool *pp = NULL; int i; if (buf_num > bm_pool->buf_num) { WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", bm_pool->id, buf_num); buf_num = bm_pool->buf_num; } if (priv->percpu_pools) pp = priv->page_pool[bm_pool->id]; for (i = 0; i < buf_num; i++) { dma_addr_t buf_dma_addr; phys_addr_t buf_phys_addr; void *data; mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, &buf_dma_addr, &buf_phys_addr); if (!pp) dma_unmap_single(dev, buf_dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE); data = (void *)phys_to_virt(buf_phys_addr); if (!data) break; mvpp2_frag_free(bm_pool, pp, data); } /* Update BM driver with number of buffers removed from pool */ bm_pool->buf_num -= i; } /* Check number of buffers in BM pool */ static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { int buf_num = 0; buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & MVPP22_BM_POOL_PTRS_NUM_MASK; buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & MVPP2_BM_BPPI_PTR_NUM_MASK; /* HW has one buffer ready which is not reflected in the counters */ if (buf_num) buf_num += 1; return buf_num; } /* Cleanup pool */ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { int buf_num; u32 val; buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); /* Check buffer counters after free */ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); if (buf_num) { WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", bm_pool->id, bm_pool->buf_num); return 0; } val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); if (priv->percpu_pools) { page_pool_destroy(priv->page_pool[bm_pool->id]); priv->page_pool[bm_pool->id] = NULL; } dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, bm_pool->dma_addr); return 0; } static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) { int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; struct mvpp2_bm_pool *bm_pool; if (priv->percpu_pools) poolnum = mvpp2_get_nrxqs(priv) * 2; /* Create all pools with maximum size */ size = MVPP2_BM_POOL_SIZE_MAX; for (i = 0; i < poolnum; i++) { bm_pool = &priv->bm_pools[i]; bm_pool->id = i; err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); if (err) goto err_unroll_pools; mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); } return 0; err_unroll_pools: dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); for (i = i - 1; i >= 0; i--) mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); return err; } /* Routine enable PPv23 8 pool mode */ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) { int val; val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); val |= MVPP23_BM_8POOL_MODE; mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); } static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) { enum dma_data_direction dma_dir = DMA_FROM_DEVICE; int i, err, poolnum = MVPP2_BM_POOLS_NUM; struct mvpp2_port *port; if (priv->percpu_pools) { for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; if (port->xdp_prog) { dma_dir = DMA_BIDIRECTIONAL; break; } } poolnum = mvpp2_get_nrxqs(priv) * 2; for (i = 0; i < poolnum; i++) { /* the pool in use */ int pn = i / (poolnum / 2); priv->page_pool[i] = mvpp2_create_page_pool(dev, mvpp2_pools[pn].buf_num, mvpp2_pools[pn].pkt_size, dma_dir); if (IS_ERR(priv->page_pool[i])) { int j; for (j = 0; j < i; j++) { page_pool_destroy(priv->page_pool[j]); priv->page_pool[j] = NULL; } return PTR_ERR(priv->page_pool[i]); } } } dev_info(dev, "using %d %s buffers\n", poolnum, priv->percpu_pools ? "per-cpu" : "shared"); for (i = 0; i < poolnum; i++) { /* Mask BM all interrupts */ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); /* Clear BM cause register */ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); } /* Allocate and initialize BM pools */ priv->bm_pools = devm_kcalloc(dev, poolnum, sizeof(*priv->bm_pools), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; if (priv->hw_version == MVPP23) mvpp23_bm_set_8pool_mode(priv); err = mvpp2_bm_pools_init(dev, priv); if (err < 0) return err; return 0; } static void mvpp2_setup_bm_pool(void) { /* Short pool */ mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; /* Long pool */ mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; /* Jumbo pool */ mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; } /* Attach long pool to rxq */ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) { u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; if (port->priv->hw_version == MVPP21) mask = MVPP21_RXQ_POOL_LONG_MASK; else mask = MVPP22_RXQ_POOL_LONG_MASK; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~mask; val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } /* Attach short pool to rxq */ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int short_pool) { u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; if (port->priv->hw_version == MVPP21) mask = MVPP21_RXQ_POOL_SHORT_MASK; else mask = MVPP22_RXQ_POOL_SHORT_MASK; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~mask; val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } static void *mvpp2_buf_alloc(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, struct page_pool *page_pool, dma_addr_t *buf_dma_addr, phys_addr_t *buf_phys_addr, gfp_t gfp_mask) { dma_addr_t dma_addr; struct page *page; void *data; data = mvpp2_frag_alloc(bm_pool, page_pool); if (!data) return NULL; if (page_pool) { page = (struct page *)data; dma_addr = page_pool_get_dma_addr(page); data = page_to_virt(page); } else { dma_addr = dma_map_single(port->dev->dev.parent, data, MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { mvpp2_frag_free(bm_pool, NULL, data); return NULL; } } *buf_dma_addr = dma_addr; *buf_phys_addr = virt_to_phys(data); return data; } /* Routine enable flow control for RXQs condition */ static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) { int val, cm3_state, host_id, q; int fq = port->first_rxq; unsigned long flags; spin_lock_irqsave(&port->priv->mss_spinlock, flags); /* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control was enabled, it would be re-enabled. */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); val &= ~FLOW_CONTROL_ENABLE_BIT; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); /* Set same Flow control for all RXQs */ for (q = 0; q < port->nrxqs; q++) { /* Set stop and start Flow control RXQ thresholds */ val = MSS_THRESHOLD_START; val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); /* Set RXQ port ID */ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) + MSS_RXQ_ASS_HOSTID_OFFS)); /* Calculate RXQ host ID: * In Single queue mode: Host ID equal to Host ID used for * shared RX interrupt * In Multi queue mode: Host ID equal to number of * RXQ ID / number of CoS queues * In Single resource mode: Host ID always equal to 0 */ if (queue_mode == MVPP2_QDIST_SINGLE_MODE) host_id = port->nqvecs; else if (queue_mode == MVPP2_QDIST_MULTI_MODE) host_id = q; else host_id = 0; /* Set RXQ host ID */ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) + MSS_RXQ_ASS_HOSTID_OFFS)); mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); } /* Notify Firmware that Flow control config space ready for update */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; val |= cm3_state; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); } /* Routine disable flow control for RXQs condition */ static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) { int val, cm3_state, q; unsigned long flags; int fq = port->first_rxq; spin_lock_irqsave(&port->priv->mss_spinlock, flags); /* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control was enabled, it would be re-enabled. */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); val &= ~FLOW_CONTROL_ENABLE_BIT; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); /* Disable Flow control for all RXQs */ for (q = 0; q < port->nrxqs; q++) { /* Set threshold 0 to disable Flow control */ val = 0; val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) + MSS_RXQ_ASS_HOSTID_OFFS)); mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); } /* Notify Firmware that Flow control config space ready for update */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; val |= cm3_state; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); } /* Routine disable/enable flow control for BM pool condition */ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, struct mvpp2_bm_pool *pool, bool en) { int val, cm3_state; unsigned long flags; spin_lock_irqsave(&port->priv->mss_spinlock, flags); /* Remove Flow control enable bit to prevent race between FW and Kernel * If Flow control were enabled, it would be re-enabled. */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); val &= ~FLOW_CONTROL_ENABLE_BIT; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); /* Check if BM pool should be enabled/disable */ if (en) { /* Set BM pool start and stop thresholds per port */ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); val |= MSS_BUF_POOL_PORT_OFFS(port->id); val &= ~MSS_BUF_POOL_START_MASK; val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); val &= ~MSS_BUF_POOL_STOP_MASK; val |= MSS_THRESHOLD_STOP; mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); } else { /* Remove BM pool from the port */ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); /* Zero BM pool start and stop thresholds to disable pool * flow control if pool empty (not used by any port) */ if (!pool->buf_num) { val &= ~MSS_BUF_POOL_START_MASK; val &= ~MSS_BUF_POOL_STOP_MASK; } mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); } /* Notify Firmware that Flow control config space ready for update */ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; val |= cm3_state; mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); } /* disable/enable flow control for BM pool on all ports */ static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) { struct mvpp2_port *port; int i; for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; if (port->priv->percpu_pools) { for (i = 0; i < port->nrxqs; i++) mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], port->tx_fc & en); } else { mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); } } } static int mvpp2_enable_global_fc(struct mvpp2 *priv) { int val, timeout = 0; /* Enable global flow control. In this stage global * flow control enabled, but still disabled per port. */ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); val |= FLOW_CONTROL_ENABLE_BIT; mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); /* Check if Firmware running and disable FC if not*/ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); while (timeout < MSS_FC_MAX_TIMEOUT) { val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) return 0; usleep_range(10, 20); timeout++; } priv->global_tx_fc = false; return -EOPNOTSUPP; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); unsigned long flags = 0; if (test_bit(thread, &port->priv->lock_map)) spin_lock_irqsave(&port->bm_lock[thread], flags); if (port->priv->hw_version >= MVPP22) { u32 val = 0; if (sizeof(dma_addr_t) == 8) val |= upper_32_bits(buf_dma_addr) & MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; if (sizeof(phys_addr_t) == 8) val |= (upper_32_bits(buf_phys_addr) << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); } /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply * returned in the "cookie" field of the RX * descriptor. Instead of storing the virtual address, we * store the physical address */ mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); if (test_bit(thread, &port->priv->lock_map)) spin_unlock_irqrestore(&port->bm_lock[thread], flags); put_cpu(); } /* Allocate buffers for the pool */ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { int i, buf_size, total_size; dma_addr_t dma_addr; phys_addr_t phys_addr; struct page_pool *pp = NULL; void *buf; if (port->priv->percpu_pools && bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { netdev_err(port->dev, "attempted to use jumbo frames with per-cpu pools"); return 0; } buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); if (buf_num < 0 || (buf_num + bm_pool->buf_num > bm_pool->size)) { netdev_err(port->dev, "cannot allocate %d buffers for pool %d\n", buf_num, bm_pool->id); return 0; } if (port->priv->percpu_pools) pp = port->priv->page_pool[bm_pool->id]; for (i = 0; i < buf_num; i++) { buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, &phys_addr, GFP_KERNEL); if (!buf) break; mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, phys_addr); } /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; netdev_dbg(port->dev, "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", bm_pool->id, bm_pool->pkt_size, buf_size, total_size); netdev_dbg(port->dev, "pool %d: %d of %d buffers added\n", bm_pool->id, i, buf_num); return i; } /* Notify the driver that BM pool is being used as specific type and return the * pool pointer on success */ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) { struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL; } /* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet */ if (new_pool->pkt_size == 0) { int pkts_num; /* Set default buffer number or free all the buffers in case * the pool is not empty */ pkts_num = new_pool->buf_num; if (pkts_num == 0) { if (port->priv->percpu_pools) { if (pool < port->nrxqs) pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; else pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; } else { pkts_num = mvpp2_pools[pool].buf_num; } } else { mvpp2_bm_bufs_free(port->dev->dev.parent, port->priv, new_pool, pkts_num); } new_pool->pkt_size = pkt_size; new_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + MVPP2_SKB_SHINFO_SIZE; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); return NULL; } } mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); return new_pool; } static struct mvpp2_bm_pool * mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, unsigned int pool, int pkt_size) { struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; if (pool > port->nrxqs * 2) { netdev_err(port->dev, "Invalid pool %d\n", pool); return NULL; } /* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet */ if (new_pool->pkt_size == 0) { int pkts_num; /* Set default buffer number or free all the buffers in case * the pool is not empty */ pkts_num = new_pool->buf_num; if (pkts_num == 0) pkts_num = mvpp2_pools[type].buf_num; else mvpp2_bm_bufs_free(port->dev->dev.parent, port->priv, new_pool, pkts_num); new_pool->pkt_size = pkt_size; new_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + MVPP2_SKB_SHINFO_SIZE; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); return NULL; } } mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); return new_pool; } /* Initialize pools for swf, shared buffers variant */ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) { enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; int rxq; /* If port pkt_size is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool */ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { long_log_pool = MVPP2_BM_JUMBO; short_log_pool = MVPP2_BM_LONG; } else { long_log_pool = MVPP2_BM_LONG; short_log_pool = MVPP2_BM_SHORT; } if (!port->pool_long) { port->pool_long = mvpp2_bm_pool_use(port, long_log_pool, mvpp2_pools[long_log_pool].pkt_size); if (!port->pool_long) return -ENOMEM; port->pool_long->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); } if (!port->pool_short) { port->pool_short = mvpp2_bm_pool_use(port, short_log_pool, mvpp2_pools[short_log_pool].pkt_size); if (!port->pool_short) return -ENOMEM; port->pool_short->port_map |= BIT(port->id); for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, port->pool_short->id); } return 0; } /* Initialize pools for swf, percpu buffers variant */ static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) { struct mvpp2_bm_pool *bm_pool; int i; for (i = 0; i < port->nrxqs; i++) { bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, mvpp2_pools[MVPP2_BM_SHORT].pkt_size); if (!bm_pool) return -ENOMEM; bm_pool->port_map |= BIT(port->id); mvpp2_rxq_short_pool_set(port, i, bm_pool->id); } for (i = 0; i < port->nrxqs; i++) { bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, mvpp2_pools[MVPP2_BM_LONG].pkt_size); if (!bm_pool) return -ENOMEM; bm_pool->port_map |= BIT(port->id); mvpp2_rxq_long_pool_set(port, i, bm_pool->id); } port->pool_long = NULL; port->pool_short = NULL; return 0; } static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { if (port->priv->percpu_pools) return mvpp2_swf_bm_pool_init_percpu(port); else return mvpp2_swf_bm_pool_init_shared(port); } static void mvpp2_set_hw_csum(struct mvpp2_port *port, enum mvpp2_bm_pool_log_num new_long_pool) { const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; /* Update L4 checksum when jumbo enable/disable on port. * Only port 0 supports hardware checksum offload due to * the Tx FIFO size limitation. * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor * has 7 bits, so the maximum L3 offset is 128. */ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { port->dev->features &= ~csums; port->dev->hw_features &= ~csums; } else { port->dev->features |= csums; port->dev->hw_features |= csums; } } static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); enum mvpp2_bm_pool_log_num new_long_pool; int pkt_size = MVPP2_RX_PKT_SIZE(mtu); if (port->priv->percpu_pools) goto out_set; /* If port MTU is higher than 1518B: * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool */ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) new_long_pool = MVPP2_BM_JUMBO; else new_long_pool = MVPP2_BM_LONG; if (new_long_pool != port->pool_long->id) { if (port->tx_fc) { if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) mvpp2_bm_pool_update_fc(port, port->pool_short, false); else mvpp2_bm_pool_update_fc(port, port->pool_long, false); } /* Remove port from old short & long pool */ port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, port->pool_long->pkt_size); port->pool_long->port_map &= ~BIT(port->id); port->pool_long = NULL; port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, port->pool_short->pkt_size); port->pool_short->port_map &= ~BIT(port->id); port->pool_short = NULL; port->pkt_size = pkt_size; /* Add port to new short & long pool */ mvpp2_swf_bm_pool_init(port); mvpp2_set_hw_csum(port, new_long_pool); if (port->tx_fc) { if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) mvpp2_bm_pool_update_fc(port, port->pool_long, true); else mvpp2_bm_pool_update_fc(port, port->pool_short, true); } /* Update L4 checksum when jumbo enable/disable on port */ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); } else { dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; } } out_set: dev->mtu = mtu; dev->wanted_features = dev->features; netdev_update_features(dev); return 0; } static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) { int i, sw_thread_mask = 0; for (i = 0; i < port->nqvecs; i++) sw_thread_mask |= port->qvecs[i].sw_thread_mask; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); } static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) { int i, sw_thread_mask = 0; for (i = 0; i < port->nqvecs; i++) sw_thread_mask |= port->qvecs[i].sw_thread_mask; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); } static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) { struct mvpp2_port *port = qvec->port; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); } static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) { struct mvpp2_port *port = qvec->port; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } /* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; int cpu = smp_processor_id(); u32 thread; /* If the thread isn't used, don't do anything */ if (cpu > port->priv->nthreads) return; thread = mvpp2_cpu_to_thread(port->priv, cpu); mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); } /* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; int cpu = smp_processor_id(); u32 val, thread; /* If the thread isn't used, don't do anything */ if (cpu >= port->priv->nthreads) return; thread = mvpp2_cpu_to_thread(port->priv, cpu); val = MVPP2_CAUSE_MISC_SUM_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); } static void mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) { u32 val; int i; if (port->priv->hw_version == MVPP21) return; if (mask) val = 0; else val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue; mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); } } /* Only GOP port 0 has an XLG MAC */ static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) { return port->gop_id == 0; } static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) { return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); } /* Port configuration routines */ static bool mvpp2_is_xlg(phy_interface_t interface) { return interface == PHY_INTERFACE_MODE_10GBASER || interface == PHY_INTERFACE_MODE_5GBASER || interface == PHY_INTERFACE_MODE_XAUI; } static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) { u32 old, val; old = val = readl(ptr); val &= ~mask; val |= set; if (old != val) writel(val, ptr); } static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; u32 val; regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); if (port->gop_id == 2) val |= GENCONF_CTRL0_PORT2_RGMII; else if (port->gop_id == 3) val |= GENCONF_CTRL0_PORT3_RGMII_MII; regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); } static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; u32 val; regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); if (port->gop_id > 1) { regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); if (port->gop_id == 2) val &= ~GENCONF_CTRL0_PORT2_RGMII; else if (port->gop_id == 3) val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); } } static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); u32 val; val = readl(xpcs + MVPP22_XPCS_CFG0); val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); writel(val, xpcs + MVPP22_XPCS_CFG0); val = readl(mpcs + MVPP22_MPCS_CTRL); val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; writel(val, mpcs + MVPP22_MPCS_CTRL); val = readl(mpcs + MVPP22_MPCS_CLK_RESET); val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); writel(val, mpcs + MVPP22_MPCS_CLK_RESET); } static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) { struct mvpp2 *priv = port->priv; void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); u32 val; val = readl(fca + MVPP22_FCA_CONTROL_REG); val &= ~MVPP22_FCA_ENABLE_PERIODIC; if (en) val |= MVPP22_FCA_ENABLE_PERIODIC; writel(val, fca + MVPP22_FCA_CONTROL_REG); } static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) { struct mvpp2 *priv = port->priv; void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); u32 lsb, msb; lsb = timer & MVPP22_FCA_REG_MASK; msb = timer >> MVPP22_FCA_REG_SIZE; writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG); writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG); } /* Set Flow Control timer x100 faster than pause quanta to ensure that link * partner won't send traffic if port is in XOFF mode. */ static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) { u32 timer; timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) * FC_QUANTA; mvpp22_gop_fca_enable_periodic(port, false); mvpp22_gop_fca_set_timer(port, timer); mvpp22_gop_fca_enable_periodic(port, true); } static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) { struct mvpp2 *priv = port->priv; u32 val; if (!priv->sysctrl_base) return 0; switch (interface) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: if (!mvpp2_port_supports_rgmii(port)) goto invalid_conf; mvpp22_gop_init_rgmii(port); break; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: mvpp22_gop_init_sgmii(port); break; case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: if (!mvpp2_port_supports_xlg(port)) goto invalid_conf; mvpp22_gop_init_10gkr(port); break; default: goto unsupported_conf; } regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | GENCONF_PORT_CTRL1_EN(port->gop_id); regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); val |= GENCONF_SOFT_RESET1_GOP; regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); mvpp22_gop_fca_set_periodic_timer(port); unsupported_conf: return 0; invalid_conf: netdev_err(port->dev, "Invalid port configuration\n"); return -EINVAL; } static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) { u32 val; if (phy_interface_mode_is_rgmii(port->phy_interface) || phy_interface_mode_is_8023z(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Enable the GMAC link status irq for this port */ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); } if (mvpp2_port_supports_xlg(port)) { /* Enable the XLG/GIG irqs for this port */ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); if (mvpp2_is_xlg(port->phy_interface)) val |= MVPP22_XLG_EXT_INT_MASK_XLG; else val |= MVPP22_XLG_EXT_INT_MASK_GIG; writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); } } static void mvpp22_gop_mask_irq(struct mvpp2_port *port) { u32 val; if (mvpp2_port_supports_xlg(port)) { val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | MVPP22_XLG_EXT_INT_MASK_GIG); writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); } if (phy_interface_mode_is_rgmii(port->phy_interface) || phy_interface_mode_is_8023z(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); } } static void mvpp22_gop_setup_irq(struct mvpp2_port *port) { u32 val; mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, MVPP22_GMAC_INT_SUM_MASK_PTP, MVPP22_GMAC_INT_SUM_MASK_PTP); if (port->phylink || phy_interface_mode_is_rgmii(port->phy_interface) || phy_interface_mode_is_8023z(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_MASK); val |= MVPP22_GMAC_INT_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_MASK); } if (mvpp2_port_supports_xlg(port)) { val = readl(port->base + MVPP22_XLG_INT_MASK); val |= MVPP22_XLG_INT_MASK_LINK; writel(val, port->base + MVPP22_XLG_INT_MASK); mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, MVPP22_XLG_EXT_INT_MASK_PTP, MVPP22_XLG_EXT_INT_MASK_PTP); } mvpp22_gop_unmask_irq(port); } /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). * * The PHY mode used by the PPv2 driver comes from the network subsystem, while * the one given to the COMPHY comes from the generic PHY subsystem. Hence they * differ. * * The COMPHY configures the serdes lanes regardless of the actual use of the * lanes by the physical layer. This is why configurations like * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. */ static int mvpp22_comphy_init(struct mvpp2_port *port, phy_interface_t interface) { int ret; if (!port->comphy) return 0; ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(port->comphy); } static void mvpp2_port_enable(struct mvpp2_port *port) { u32 val; if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val |= MVPP22_XLG_CTRL0_PORT_EN; val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } else { val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val |= MVPP2_GMAC_PORT_EN_MASK; val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } } static void mvpp2_port_disable(struct mvpp2_port *port) { u32 val; if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_PORT_EN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val &= ~(MVPP2_GMAC_PORT_EN_MASK); writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } /* Configure loopback port */ static void mvpp2_port_loopback_set(struct mvpp2_port *port, const struct phylink_link_state *state) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); if (state->speed == 1000) val |= MVPP2_GMAC_GMII_LB_EN_MASK; else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; if (phy_interface_mode_is_8023z(state->interface) || state->interface == PHY_INTERFACE_MODE_SGMII) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } enum { ETHTOOL_XDP_REDIRECT, ETHTOOL_XDP_PASS, ETHTOOL_XDP_DROP, ETHTOOL_XDP_TX, ETHTOOL_XDP_TX_ERR, ETHTOOL_XDP_XMIT, ETHTOOL_XDP_XMIT_ERR, }; struct mvpp2_ethtool_counter { unsigned int offset; const char string[ETH_GSTRING_LEN]; bool reg_is_64b; }; static u64 mvpp2_read_count(struct mvpp2_port *port, const struct mvpp2_ethtool_counter *counter) { u64 val; val = readl(port->stats_base + counter->offset); if (counter->reg_is_64b) val += (u64)readl(port->stats_base + counter->offset + 4) << 32; return val; } /* Some counters are accessed indirectly by first writing an index to * MVPP2_CTRS_IDX. The index can represent various resources depending on the * register we access, it can be a hit counter for some classification tables, * a counter specific to a rxq, a txq or a buffer pool. */ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) { mvpp2_write(priv, MVPP2_CTRS_IDX, index); return mvpp2_read(priv, reg); } /* Due to the fact that software statistics and hardware statistics are, by * design, incremented at different moments in the chain of packet processing, * it is very likely that incoming packets could have been dropped after being * counted by hardware but before reaching software statistics (most probably * multicast packets), and in the opposite way, during transmission, FCS bytes * are added in between as well as TSO skb will be split and header bytes added. * Hence, statistics gathered from userspace with ifconfig (software) and * ethtool (hardware) cannot be compared. */ static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, { MVPP2_MIB_FC_SENT, "fc_sent" }, { MVPP2_MIB_FC_RCVD, "fc_received" }, { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, { MVPP2_MIB_COLLISION, "collision" }, { MVPP2_MIB_LATE_COLLISION, "late_collision" }, }; static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, }; static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, }; static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, }; static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, { ETHTOOL_XDP_TX, "rx_xdp_tx", }, { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, }; #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ ARRAY_SIZE(mvpp2_ethtool_xdp)) static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { struct mvpp2_port *port = netdev_priv(netdev); int i, q; if (sset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) { strscpy(data, mvpp2_ethtool_mib_regs[i].string, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) { strscpy(data, mvpp2_ethtool_port_regs[i].string, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (q = 0; q < port->ntxqs; q++) { for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { snprintf(data, ETH_GSTRING_LEN, mvpp2_ethtool_txq_regs[i].string, q); data += ETH_GSTRING_LEN; } } for (q = 0; q < port->nrxqs; q++) { for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { snprintf(data, ETH_GSTRING_LEN, mvpp2_ethtool_rxq_regs[i].string, q); data += ETH_GSTRING_LEN; } } for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { strscpy(data, mvpp2_ethtool_xdp[i].string, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } } static void mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) { unsigned int start; unsigned int cpu; /* Gather XDP Statistics */ for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; u64 xdp_redirect; u64 xdp_pass; u64 xdp_drop; u64 xdp_xmit; u64 xdp_xmit_err; u64 xdp_tx; u64 xdp_tx_err; cpu_stats = per_cpu_ptr(port->stats, cpu); do { start = u64_stats_fetch_begin(&cpu_stats->syncp); xdp_redirect = cpu_stats->xdp_redirect; xdp_pass = cpu_stats->xdp_pass; xdp_drop = cpu_stats->xdp_drop; xdp_xmit = cpu_stats->xdp_xmit; xdp_xmit_err = cpu_stats->xdp_xmit_err; xdp_tx = cpu_stats->xdp_tx; xdp_tx_err = cpu_stats->xdp_tx_err; } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); xdp_stats->xdp_redirect += xdp_redirect; xdp_stats->xdp_pass += xdp_pass; xdp_stats->xdp_drop += xdp_drop; xdp_stats->xdp_xmit += xdp_xmit; xdp_stats->xdp_xmit_err += xdp_xmit_err; xdp_stats->xdp_tx += xdp_tx; xdp_stats->xdp_tx_err += xdp_tx_err; } } static void mvpp2_read_stats(struct mvpp2_port *port) { struct mvpp2_pcpu_stats xdp_stats = {}; const struct mvpp2_ethtool_counter *s; u64 *pstats; int i, q; pstats = port->ethtool_stats; for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) *pstats++ += mvpp2_read(port->priv, mvpp2_ethtool_port_regs[i].offset + 4 * port->id); for (q = 0; q < port->ntxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, MVPP22_CTRS_TX_CTR(port->id, q), mvpp2_ethtool_txq_regs[i].offset); /* Rxqs are numbered from 0 from the user standpoint, but not from the * driver's. We need to add the port->first_rxq offset. */ for (q = 0; q < port->nrxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, port->first_rxq + q, mvpp2_ethtool_rxq_regs[i].offset); /* Gather XDP Statistics */ mvpp2_get_xdp_stats(port, &xdp_stats); for (i = 0, s = mvpp2_ethtool_xdp; s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); s++, i++) { switch (s->offset) { case ETHTOOL_XDP_REDIRECT: *pstats++ = xdp_stats.xdp_redirect; break; case ETHTOOL_XDP_PASS: *pstats++ = xdp_stats.xdp_pass; break; case ETHTOOL_XDP_DROP: *pstats++ = xdp_stats.xdp_drop; break; case ETHTOOL_XDP_TX: *pstats++ = xdp_stats.xdp_tx; break; case ETHTOOL_XDP_TX_ERR: *pstats++ = xdp_stats.xdp_tx_err; break; case ETHTOOL_XDP_XMIT: *pstats++ = xdp_stats.xdp_xmit; break; case ETHTOOL_XDP_XMIT_ERR: *pstats++ = xdp_stats.xdp_xmit_err; break; } } } static void mvpp2_gather_hw_statistics(struct work_struct *work) { struct delayed_work *del_work = to_delayed_work(work); struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, stats_work); mutex_lock(&port->gather_stats_lock); mvpp2_read_stats(port); /* No need to read again the counters right after this function if it * was called asynchronously by the user (ie. use of ethtool). */ cancel_delayed_work(&port->stats_work); queue_delayed_work(port->priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); mutex_unlock(&port->gather_stats_lock); } static void mvpp2_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mvpp2_port *port = netdev_priv(dev); /* Update statistics for the given port, then take the lock to avoid * concurrent accesses on the ethtool_stats structure during its copy. */ mvpp2_gather_hw_statistics(&port->stats_work.work); mutex_lock(&port->gather_stats_lock); memcpy(data, port->ethtool_stats, sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); mutex_unlock(&port->gather_stats_lock); } static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) { struct mvpp2_port *port = netdev_priv(dev); if (sset == ETH_SS_STATS) return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); return -EOPNOTSUPP; } static void mvpp2_mac_reset_assert(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | MVPP2_GMAC_PORT_RESET_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { val = readl(port->base + MVPP22_XLG_CTRL0_REG) & ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } } static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; void __iomem *mpcs, *xpcs; u32 val; if (port->priv->hw_version == MVPP21 || port->gop_id != 0) return; mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); val = readl(mpcs + MVPP22_MPCS_CLK_RESET); val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); val |= MVPP22_MPCS_CLK_RESET_DIV_SET; writel(val, mpcs + MVPP22_MPCS_CLK_RESET); val = readl(xpcs + MVPP22_XPCS_CFG0); writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); } static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, phy_interface_t interface) { struct mvpp2 *priv = port->priv; void __iomem *mpcs, *xpcs; u32 val; if (port->priv->hw_version == MVPP21 || port->gop_id != 0) return; mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); switch (interface) { case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: val = readl(mpcs + MVPP22_MPCS_CLK_RESET); val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; writel(val, mpcs + MVPP22_MPCS_CLK_RESET); break; case PHY_INTERFACE_MODE_XAUI: case PHY_INTERFACE_MODE_RXAUI: val = readl(xpcs + MVPP22_XPCS_CFG0); writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); break; default: break; } } /* Change maximum receive size of the port */ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << MVPP2_GMAC_MAX_RX_SIZE_OFFS); writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } /* Change maximum receive size of the port */ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP22_XLG_CTRL1_REG); val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; writel(val, port->base + MVPP22_XLG_CTRL1_REG); } /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { int tx_port_num, val, queue, lrxq; if (port->priv->hw_version == MVPP21) { /* Update TX FIFO MIN Threshold */ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; /* Min. TX threshold must be less than minimal packet length */ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); } /* Disable Legacy WRR, Disable EJP, Release from reset */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); /* Set TXQ scheduling to Round-Robin */ mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); /* Set refill period to 1 usec, refill tokens * and bucket size to maximum */ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, port->priv->tclk / USEC_PER_SEC); val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); val = MVPP2_TXP_TOKEN_SIZE_MAX; mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); /* Set MaximumLowLatencyPacketSize value to 256 */ mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); /* Enable Rx cache snoop */ for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_SNOOP_PKT_SIZE_MASK | MVPP2_SNOOP_BUF_HDR_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } /* At default, mask all interrupts to all present cpus */ mvpp2_interrupts_disable(port); } /* Enable/disable receiving packets */ static void mvpp2_ingress_enable(struct mvpp2_port *port) { u32 val; int lrxq, queue; for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val &= ~MVPP2_RXQ_DISABLE_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } } static void mvpp2_ingress_disable(struct mvpp2_port *port) { u32 val; int lrxq, queue; for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_RXQ_DISABLE_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } } /* Enable transmit via physical egress queue * - HW starts take descriptors from DRAM */ static void mvpp2_egress_enable(struct mvpp2_port *port) { u32 qmap; int queue; int tx_port_num = mvpp2_egress_port(port); /* Enable all initialized TXs. */ qmap = 0; for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; if (txq->descs) qmap |= (1 << queue); } mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); } /* Disable transmit via physical egress queue * - HW doesn't take descriptors from DRAM */ static void mvpp2_egress_disable(struct mvpp2_port *port) { u32 reg_data; int delay; int tx_port_num = mvpp2_egress_port(port); /* Issue stop command for active channels only */ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & MVPP2_TXP_SCHED_ENQ_MASK; if (reg_data != 0) mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); /* Wait for all Tx activity to terminate. */ delay = 0; do { if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { netdev_warn(port->dev, "Tx stop timed out, status=0x%08x\n", reg_data); break; } mdelay(1); delay++; /* Check port TX Command register that all * Tx queues are stopped */ reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); } /* Rx descriptors helper methods */ /* Get number of Rx descriptors occupied by received packets */ static inline int mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) { u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); return val & MVPP2_RXQ_OCCUPIED_MASK; } /* Update Rx queue status with the number of occupied and available * Rx descriptor slots. */ static inline void mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, int used_count, int free_count) { /* Decrement the number of used descriptors and increment count * increment the number of free descriptors. */ u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); } /* Get pointer to next RX descriptor to be processed by SW */ static inline struct mvpp2_rx_desc * mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) { int rx_desc = rxq->next_desc_to_proc; rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); prefetch(rxq->descs + rxq->next_desc_to_proc); return rxq->descs + rx_desc; } /* Set rx queue offset */ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset) { u32 val; /* Convert offset from bytes to units of 32 bytes */ offset = offset >> 5; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; /* Offset is in */ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & MVPP2_RXQ_PACKET_OFFSET_MASK); mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } /* Tx descriptors helper methods */ /* Get pointer to next Tx descriptor to be processed (send) by HW */ static struct mvpp2_tx_desc * mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) { int tx_desc = txq->next_desc_to_proc; txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); return txq->descs + tx_desc; } /* Update HW with number of aggregated Tx descriptors to be sent * * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ mvpp2_thread_write(port->priv, mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); } /* Check if there are enough free descriptors in aggregated txq. * If not, update the number of occupied descriptors and repeat the check. * * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); u32 val = mvpp2_read_relaxed(port->priv, MVPP2_AGGR_TXQ_STATUS_REG(thread)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) return -ENOMEM; } return 0; } /* Reserved Tx descriptors allocation request * * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); struct mvpp2 *priv = port->priv; u32 val; val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { int req, desc_count; unsigned int thread; if (txq_pcpu->reserved_num >= num) return 0; /* Not enough descriptors reserved! Update the reserved descriptor * count and check again. */ desc_count = 0; /* Compute total of used descriptors */ for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux; txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); desc_count += req; if (desc_count > (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM; txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); /* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) return -ENOMEM; return 0; } /* Release the last allocated Tx descriptor. Useful to handle DMA * mapping failures in the Tx path. */ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) { if (txq->next_desc_to_proc == 0) txq->next_desc_to_proc = txq->last_desc - 1; else txq->next_desc_to_proc--; } /* Set Tx descriptors fields relevant for CSUM calculation */ static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, int ip_hdr_len, int l4_proto) { u32 command; /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, * G_L4_chk, L4_type required only for checksum calculation */ command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; if (l3_proto == htons(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ } if (l4_proto == IPPROTO_TCP) { command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ } else if (l4_proto == IPPROTO_UDP) { command |= MVPP2_TXD_L4_UDP; /* enable UDP */ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ } else { command |= MVPP2_TXD_L4_CSUM_NOT; } return command; } /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration * disabled) so using smp_processor_id() is OK. */ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; /* Reading status reg resets transmitted descriptor counter */ val = mvpp2_thread_read_relaxed(port->priv, mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; } /* Called through on_each_cpu(), so runs on all CPUs, with migration * disabled, therefore using smp_processor_id() is OK. */ static void mvpp2_txq_sent_counter_clear(void *arg) { struct mvpp2_port *port = arg; int queue; /* If the thread isn't used, don't do anything */ if (smp_processor_id() >= port->priv->nthreads) return; for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; mvpp2_thread_read(port->priv, mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } /* Set max sizes for Tx queues */ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) { u32 val, size, mtu; int txq, tx_port_num; mtu = port->pkt_size * 8; if (mtu > MVPP2_TXP_MTU_MAX) mtu = MVPP2_TXP_MTU_MAX; /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ mtu = 3 * mtu; /* Indirect access to registers */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); /* Set MTU */ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); val &= ~MVPP2_TXP_MTU_MAX; val |= mtu; mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); /* TXP token size and all TXQs token size must be larger that MTU */ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); size = val & MVPP2_TXP_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; val |= size; mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); } for (txq = 0; txq < port->ntxqs; txq++) { val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; val |= size; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val); } } } /* Set the number of non-occupied descriptors threshold */ static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { u32 val; mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); } /* Set the number of packets that will be received before Rx interrupt * will be generated by HW. */ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); put_cpu(); } /* For some reason in the LSP this is done on each CPU. Why ? */ static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { unsigned int thread; u32 val; if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); /* PKT-coalescing registers are per-queue + per-thread */ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); } } static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) { u64 tmp = (u64)clk_hz * usec; do_div(tmp, USEC_PER_SEC); return tmp > U32_MAX ? U32_MAX : tmp; } static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) { u64 tmp = (u64)cycles * USEC_PER_SEC; do_div(tmp, clk_hz); return tmp > U32_MAX ? U32_MAX : tmp; } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { unsigned long freq = port->priv->tclk; u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { rxq->time_coal = mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); /* re-evaluate to get actual register value */ val = mvpp2_usec_to_cycles(rxq->time_coal, freq); } mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) { unsigned long freq = port->priv->tclk; u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { port->tx_time_coal = mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); /* re-evaluate to get actual register value */ val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); } mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); } /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { struct xdp_frame_bulk bq; int i; xdp_frame_bulk_init(&bq); rcu_read_lock(); /* need for xdp_return_frame_bulk */ for (i = 0; i < num; i++) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && tx_buf->type != MVPP2_TYPE_XDP_TX) dma_unmap_single(port->dev->dev.parent, tx_buf->dma, tx_buf->size, DMA_TO_DEVICE); if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) dev_kfree_skb_any(tx_buf->skb); else if (tx_buf->type == MVPP2_TYPE_XDP_TX || tx_buf->type == MVPP2_TYPE_XDP_NDO) xdp_return_frame_bulk(tx_buf->xdpf, &bq); mvpp2_txq_inc_get(txq_pcpu); } xdp_flush_frame_bulk(&bq); rcu_read_unlock(); } static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, u32 cause) { int queue = fls(cause) - 1; return port->rxqs[queue]; } static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { int queue = fls(cause) - 1; return port->txqs[queue]; } /* Handle end of transmission */ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu) { struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done; if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); tx_done = mvpp2_txq_sent_desc_proc(port, txq); if (!tx_done) return; mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); txq_pcpu->count -= tx_done; if (netif_tx_queue_stopped(nq)) if (txq_pcpu->count <= txq_pcpu->wake_threshold) netif_tx_wake_queue(nq); } static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; unsigned int tx_todo = 0; while (cause) { txq = mvpp2_get_tx_queue(port, cause); if (!txq) break; txq_pcpu = per_cpu_ptr(txq->pcpu, thread); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); tx_todo += txq_pcpu->count; } cause &= ~(1 << txq->log_id); } return tx_todo; } /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, struct mvpp2_tx_queue *aggr_txq, unsigned int thread, struct mvpp2 *priv) { u32 txq_dma; /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; /* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, MVPP2_AGGR_TXQ_INDEX_REG(thread)); /* Set Tx descriptors queue starting address indirect * access */ if (priv->hw_version == MVPP21) txq_dma = aggr_txq->descs_dma; else txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE); return 0; } /* Create a specified Rx queue */ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { struct mvpp2 *priv = port->priv; unsigned int thread; u32 rxq_dma; int err; rxq->size = port->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, &rxq->descs_dma, GFP_KERNEL); if (!rxq->descs) return -ENOMEM; rxq->last_desc = rxq->size - 1; /* Zero occupied and non-occupied counters - direct access */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu(); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); /* Set coalescing pkts and time */ mvpp2_rx_pkts_coal_set(port, rxq); mvpp2_rx_time_coal_set(port, rxq); /* Set the number of non occupied descriptors threshold */ mvpp2_set_rxq_free_tresh(port, rxq); /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); if (priv->percpu_pools) { err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_free_dma; err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_unregister_rxq_short; /* Every RXQ has a pool for short and another for long packets */ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, MEM_TYPE_PAGE_POOL, priv->page_pool[rxq->logic_rxq]); if (err < 0) goto err_unregister_rxq_long; err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, MEM_TYPE_PAGE_POOL, priv->page_pool[rxq->logic_rxq + port->nrxqs]); if (err < 0) goto err_unregister_mem_rxq_short; } return 0; err_unregister_mem_rxq_short: xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); err_unregister_rxq_long: xdp_rxq_info_unreg(&rxq->xdp_rxq_long); err_unregister_rxq_short: xdp_rxq_info_unreg(&rxq->xdp_rxq_short); err_free_dma: dma_free_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, rxq->descs, rxq->descs_dma); return err; } /* Push packets received by the RXQ to BM pool */ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { int rx_received, i; rx_received = mvpp2_rxq_received(port, rxq->id); if (!rx_received) return; for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); u32 status = mvpp2_rxdesc_status_get(port, rx_desc); int pool; pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; mvpp2_bm_pool_put(port, pool, mvpp2_rxdesc_dma_addr_get(port, rx_desc), mvpp2_rxdesc_cookie_get(port, rx_desc)); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); } /* Cleanup Rx queue */ static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { unsigned int thread; if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) xdp_rxq_info_unreg(&rxq->xdp_rxq_short); if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) xdp_rxq_info_unreg(&rxq->xdp_rxq_long); mvpp2_rxq_drop_pkts(port, rxq); if (rxq->descs) dma_free_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, rxq->descs, rxq->descs_dma); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_dma = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); } /* Create and initialize a Tx queue */ static int mvpp2_txq_init(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; unsigned int thread; int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu; txq->size = port->tx_ring_size; /* Allocate memory for Tx descriptors */ txq->descs = dma_alloc_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, &txq->descs_dma, GFP_KERNEL); if (!txq->descs) return -ENOMEM; txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS */ desc_per_txq = 16; desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); val = MVPP2_TXQ_TOKEN_SIZE_MAX; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val); for (thread = 0; thread < port->priv->nthreads; thread++) { txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), GFP_KERNEL); if (!txq_pcpu->buffs) return -ENOMEM; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; txq_pcpu->tso_headers = NULL; txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; txq_pcpu->tso_headers = dma_alloc_coherent(port->dev->dev.parent, txq_pcpu->size * TSO_HEADER_SIZE, &txq_pcpu->tso_headers_dma, GFP_KERNEL); if (!txq_pcpu->tso_headers) return -ENOMEM; } return 0; } /* Free allocated TXQ resources */ static void mvpp2_txq_deinit(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; unsigned int thread; for (thread = 0; thread < port->priv->nthreads; thread++) { txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs); if (txq_pcpu->tso_headers) dma_free_coherent(port->dev->dev.parent, txq_pcpu->size * TSO_HEADER_SIZE, txq_pcpu->tso_headers, txq_pcpu->tso_headers_dma); txq_pcpu->tso_headers = NULL; } if (txq->descs) dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, txq->descs, txq->descs_dma); txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); /* Set Tx descriptors queue starting address and size */ thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); } /* Cleanup Tx ports */ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; int delay, pending; unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val; mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. */ delay = 0; do { if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { netdev_warn(port->dev, "port %d: cleaning queue %d timed out\n", port->id, txq->log_id); break; } mdelay(1); delay++; pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu(); for (thread = 0; thread < port->priv->nthreads; thread++) { txq_pcpu = per_cpu_ptr(txq->pcpu, thread); /* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); /* Reset queue */ txq_pcpu->count = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; } } /* Cleanup all Tx queues */ static void mvpp2_cleanup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; int queue; u32 val; val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); /* Reset Tx ports and delete Tx queues */ val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; mvpp2_txq_clean(port, txq); mvpp2_txq_deinit(port, txq); } on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); } /* Cleanup all Rx queues */ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) { int queue; for (queue = 0; queue < port->nrxqs; queue++) mvpp2_rxq_deinit(port, port->rxqs[queue]); if (port->tx_fc) mvpp2_rxq_disable_fc(port); } /* Init all Rx queues for port */ static int mvpp2_setup_rxqs(struct mvpp2_port *port) { int queue, err; for (queue = 0; queue < port->nrxqs; queue++) { err = mvpp2_rxq_init(port, port->rxqs[queue]); if (err) goto err_cleanup; } if (port->tx_fc) mvpp2_rxq_enable_fc(port); return 0; err_cleanup: mvpp2_cleanup_rxqs(port); return err; } /* Init all tx queues for port */ static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; int queue, err; for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; /* Assign this queue to a CPU */ if (queue < num_possible_cpus()) netif_set_xps_queue(port->dev, cpumask_of(queue), queue); } if (port->has_tx_irqs) { mvpp2_tx_time_coal_set(port); for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; mvpp2_tx_pkts_coal_set(port, txq); } } on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; err_cleanup: mvpp2_cleanup_txqs(port); return err; } /* The callback for per-port interrupt */ static irqreturn_t mvpp2_isr(int irq, void *dev_id) { struct mvpp2_queue_vector *qv = dev_id; mvpp2_qvec_interrupt_disable(qv); napi_schedule(&qv->napi); return IRQ_HANDLED; } static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) { struct skb_shared_hwtstamps shhwtstamps; struct mvpp2_hwtstamp_queue *queue; struct sk_buff *skb; void __iomem *ptp_q; unsigned int id; u32 r0, r1, r2; ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); if (nq) ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; queue = &port->tx_hwtstamp_queue[nq]; while (1) { r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; if (!r0) break; r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; id = (r0 >> 1) & 31; skb = queue->skb[id]; queue->skb[id] = NULL; if (skb) { u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } } } static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) { void __iomem *ptp; u32 val; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); val = readl(ptp + MVPP22_PTP_INT_CAUSE); if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) mvpp2_isr_handle_ptp_queue(port, 0); if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) mvpp2_isr_handle_ptp_queue(port, 1); } static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) { struct net_device *dev = port->dev; if (port->phylink) { phylink_mac_change(port->phylink, link); return; } if (!netif_running(dev)) return; if (link) { mvpp2_interrupts_enable(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); netif_carrier_on(dev); netif_tx_wake_all_queues(dev); } else { netif_tx_stop_all_queues(dev); netif_carrier_off(dev); mvpp2_ingress_disable(port); mvpp2_egress_disable(port); mvpp2_interrupts_disable(port); } } static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) { bool link; u32 val; val = readl(port->base + MVPP22_XLG_INT_STAT); if (val & MVPP22_XLG_INT_STAT_LINK) { val = readl(port->base + MVPP22_XLG_STATUS); link = (val & MVPP22_XLG_STATUS_LINK_UP); mvpp2_isr_handle_link(port, link); } } static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) { bool link; u32 val; if (phy_interface_mode_is_rgmii(port->phy_interface) || phy_interface_mode_is_8023z(port->phy_interface) || port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_STAT); if (val & MVPP22_GMAC_INT_STAT_LINK) { val = readl(port->base + MVPP2_GMAC_STATUS0); link = (val & MVPP2_GMAC_STATUS0_LINK_UP); mvpp2_isr_handle_link(port, link); } } } /* Per-port interrupt for link status changes */ static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) { struct mvpp2_port *port = (struct mvpp2_port *)dev_id; u32 val; mvpp22_gop_mask_irq(port); if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) { /* Check the external status register */ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); if (val & MVPP22_XLG_EXT_INT_STAT_XLG) mvpp2_isr_handle_xlg(port); if (val & MVPP22_XLG_EXT_INT_STAT_PTP) mvpp2_isr_handle_ptp(port); } else { /* If it's not the XLG, we must be using the GMAC. * Check the summary status. */ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) mvpp2_isr_handle_gmac_internal(port); if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) mvpp2_isr_handle_ptp(port); } mvpp22_gop_unmask_irq(port); return IRQ_HANDLED; } static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) { struct net_device *dev; struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause; port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); dev = port_pcpu->dev; if (!netif_running(dev)) return HRTIMER_NORESTART; port_pcpu->timer_scheduled = false; port = netdev_priv(dev); /* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; tx_todo = mvpp2_tx_done(port, cause, mvpp2_cpu_to_thread(port->priv, smp_processor_id())); /* Set the timer in case not all the packets were processed */ if (tx_todo && !port_pcpu->timer_scheduled) { port_pcpu->timer_scheduled = true; hrtimer_forward_now(&port_pcpu->tx_done_timer, MVPP2_TXDONE_HRTIMER_PERIOD_NS); return HRTIMER_RESTART; } return HRTIMER_NORESTART; } /* Main RX/TX processing routines */ /* Display more error info */ static void mvpp2_rx_error(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { u32 status = mvpp2_rxdesc_status_get(port, rx_desc); size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); char *err_str = NULL; switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: err_str = "crc"; break; case MVPP2_RXD_ERR_OVERRUN: err_str = "overrun"; break; case MVPP2_RXD_ERR_RESOURCE: err_str = "resource"; break; } if (err_str && net_ratelimit()) netdev_err(port->dev, "bad rx status %08x (%s error), size=%zu\n", status, err_str, sz); } /* Handle RX checksum offload */ static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) { if (((status & MVPP2_RXD_L3_IP4) && !(status & MVPP2_RXD_IP4_HEADER_ERR)) || (status & MVPP2_RXD_L3_IP6)) if (((status & MVPP2_RXD_L4_UDP) || (status & MVPP2_RXD_L4_TCP)) && (status & MVPP2_RXD_L4_CSUM_OK)) return CHECKSUM_UNNECESSARY; return CHECKSUM_NONE; } /* Allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, struct page_pool *page_pool, int pool) { dma_addr_t dma_addr; phys_addr_t phys_addr; void *buf; buf = mvpp2_buf_alloc(port, bm_pool, page_pool, &dma_addr, &phys_addr, GFP_ATOMIC); if (!buf) return -ENOMEM; mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); return 0; } /* Handle tx checksum */ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; u8 l4_proto; __be16 l3_proto = vlan_get_protocol(skb); if (l3_proto == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; } else if (l3_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ if (skb_network_header_len(skb) > 0) ip_hdr_len = (skb_network_header_len(skb) >> 2); l4_proto = ip6h->nexthdr; } else { return MVPP2_TXD_L4_CSUM_NOT; } return mvpp2_txq_desc_csum(skb_network_offset(skb), l3_proto, ip_hdr_len, l4_proto); } return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); struct mvpp2_tx_queue *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_queue *txq; struct netdev_queue *nq; txq = port->txqs[txq_id]; txq_pcpu = per_cpu_ptr(txq->pcpu, thread); nq = netdev_get_tx_queue(port->dev, txq_id); aggr_txq = &port->priv->aggr_txqs[thread]; txq_pcpu->reserved_num -= nxmit; txq_pcpu->count += nxmit; aggr_txq->count += nxmit; /* Enable transmit */ wmb(); mvpp2_aggr_txq_pend_desc_add(port, nxmit); if (txq_pcpu->count >= txq_pcpu->stop_threshold) netif_tx_stop_queue(nq); /* Finalize TX processing */ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) mvpp2_txq_done(port, txq, txq_pcpu); } static int mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, struct xdp_frame *xdpf, bool dma_map) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; enum mvpp2_tx_buf_type buf_type; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_queue *aggr_txq; struct mvpp2_tx_desc *tx_desc; struct mvpp2_tx_queue *txq; int ret = MVPP2_XDP_TX; dma_addr_t dma_addr; txq = port->txqs[txq_id]; txq_pcpu = per_cpu_ptr(txq->pcpu, thread); aggr_txq = &port->priv->aggr_txqs[thread]; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { ret = MVPP2_XDP_DROPPED; goto out; } /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); if (dma_map) { /* XDP_REDIRECT or AF_XDP */ dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, xdpf->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { mvpp2_txq_desc_put(txq); ret = MVPP2_XDP_DROPPED; goto out; } buf_type = MVPP2_TYPE_XDP_NDO; } else { /* XDP_TX */ struct page *page = virt_to_page(xdpf->data); dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + xdpf->headroom; dma_sync_single_for_device(port->dev->dev.parent, dma_addr, xdpf->len, DMA_BIDIRECTIONAL); buf_type = MVPP2_TYPE_XDP_TX; } mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); out: return ret; } static int mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); struct xdp_frame *xdpf; u16 txq_id; int ret; xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) return MVPP2_XDP_DROPPED; /* The first of the TX queues are used for XPS, * the second half for XDP_TX */ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); if (ret == MVPP2_XDP_TX) { u64_stats_update_begin(&stats->syncp); stats->tx_bytes += xdpf->len; stats->tx_packets++; stats->xdp_tx++; u64_stats_update_end(&stats->syncp); mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); } else { u64_stats_update_begin(&stats->syncp); stats->xdp_tx_err++; u64_stats_update_end(&stats->syncp); } return ret; } static int mvpp2_xdp_xmit(struct net_device *dev, int num_frame, struct xdp_frame **frames, u32 flags) { struct mvpp2_port *port = netdev_priv(dev); int i, nxmit_byte = 0, nxmit = 0; struct mvpp2_pcpu_stats *stats; u16 txq_id; u32 ret; if (unlikely(test_bit(0, &port->state))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; /* The first of the TX queues are used for XPS, * the second half for XDP_TX */ txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); for (i = 0; i < num_frame; i++) { ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); if (ret != MVPP2_XDP_TX) break; nxmit_byte += frames[i]->len; nxmit++; } if (likely(nxmit > 0)) mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); stats = this_cpu_ptr(port->stats); u64_stats_update_begin(&stats->syncp); stats->tx_bytes += nxmit_byte; stats->tx_packets += nxmit; stats->xdp_xmit += nxmit; stats->xdp_xmit_err += num_frame - nxmit; u64_stats_update_end(&stats->syncp); return nxmit; } static int mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, struct xdp_buff *xdp, struct page_pool *pp, struct mvpp2_pcpu_stats *stats) { unsigned int len, sync, err; struct page *page; u32 ret, act; len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; act = bpf_prog_run_xdp(prog, xdp); /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; sync = max(sync, len); switch (act) { case XDP_PASS: stats->xdp_pass++; ret = MVPP2_XDP_PASS; break; case XDP_REDIRECT: err = xdp_do_redirect(port->dev, xdp, prog); if (unlikely(err)) { ret = MVPP2_XDP_DROPPED; page = virt_to_head_page(xdp->data); page_pool_put_page(pp, page, sync, true); } else { ret = MVPP2_XDP_REDIR; stats->xdp_redirect++; } break; case XDP_TX: ret = mvpp2_xdp_xmit_back(port, xdp); if (ret != MVPP2_XDP_TX) { page = virt_to_head_page(xdp->data); page_pool_put_page(pp, page, sync, true); } break; default: bpf_warn_invalid_xdp_action(port->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(port->dev, prog, act); fallthrough; case XDP_DROP: page = virt_to_head_page(xdp->data); page_pool_put_page(pp, page, sync, true); ret = MVPP2_XDP_DROPPED; stats->xdp_drop++; break; } return ret; } static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, int pool, u32 rx_status) { phys_addr_t phys_addr, phys_addr_next; dma_addr_t dma_addr, dma_addr_next; struct mvpp2_buff_hdr *buff_hdr; phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); do { buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); if (port->priv->hw_version >= MVPP22) { phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); } mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); phys_addr = phys_addr_next; dma_addr = dma_addr_next; } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); } /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; struct mvpp2_pcpu_stats ps = {}; enum dma_data_direction dma_dir; struct bpf_prog *xdp_prog; struct xdp_buff xdp; int rx_received; int rx_done = 0; u32 xdp_ret = 0; xdp_prog = READ_ONCE(port->xdp_prog); /* Get number of received packets and clamp the to-do */ rx_received = mvpp2_rxq_received(port, rxq->id); if (rx_todo > rx_received) rx_todo = rx_received; while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct page_pool *pp = NULL; struct sk_buff *skb; unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; u32 rx_status, timestamp; int pool, rx_bytes, err, ret; struct page *page; void *data; phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); data = (void *)phys_to_virt(phys_addr); page = virt_to_page(data); prefetch(page); rx_done++; rx_status = mvpp2_rxdesc_status_get(port, rx_desc); rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); rx_bytes -= MVPP2_MH_SIZE; dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool]; if (port->priv->percpu_pools) { pp = port->priv->page_pool[pool]; dma_dir = page_pool_get_dma_dir(pp); } else { dma_dir = DMA_FROM_DEVICE; } dma_sync_single_for_cpu(dev->dev.parent, dma_addr, rx_bytes + MVPP2_MH_SIZE, dma_dir); /* Buffer header not supported */ if (rx_status & MVPP2_RXD_BUF_HDR) goto err_drop_frame; /* In case of an error, release the requested buffer pointer * to the Buffer Manager. This request process is controlled * by the hardware, and the information about the buffer is * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) goto err_drop_frame; /* Prefetch header */ prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); if (bm_pool->frag_size > PAGE_SIZE) frag_size = 0; else frag_size = bm_pool->frag_size; if (xdp_prog) { struct xdp_rxq_info *xdp_rxq; if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) xdp_rxq = &rxq->xdp_rxq_short; else xdp_rxq = &rxq->xdp_rxq_long; xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); xdp_prepare_buff(&xdp, data, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, rx_bytes, false); ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); if (ret) { xdp_ret |= ret; err = mvpp2_rx_refill(port, bm_pool, pp, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; } ps.rx_packets++; ps.rx_bytes += rx_bytes; continue; } } skb = build_skb(data, frag_size); if (!skb) { netdev_warn(port->dev, "skb build failed\n"); goto err_drop_frame; } /* If we have RX hardware timestamping enabled, grab the * timestamp from the queue and convert. */ if (mvpp22_rx_hwtstamping(port)) { timestamp = le32_to_cpu(rx_desc->pp22.timestamp); mvpp22_tai_tstamp(port->priv->tai, timestamp, skb_hwtstamps(skb)); } err = mvpp2_rx_refill(port, bm_pool, pp, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); dev_kfree_skb_any(skb); goto err_drop_frame; } if (pp) skb_mark_for_recycle(skb); else dma_unmap_single_attrs(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); ps.rx_packets++; ps.rx_bytes += rx_bytes; skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); skb->ip_summed = mvpp2_rx_csum(port, rx_status); skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(napi, skb); continue; err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ if (rx_status & MVPP2_RXD_BUF_HDR) mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); else mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } if (xdp_ret & MVPP2_XDP_REDIR) xdp_do_flush_map(); if (ps.rx_packets) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets += ps.rx_packets; stats->rx_bytes += ps.rx_bytes; /* xdp */ stats->xdp_redirect += ps.xdp_redirect; stats->xdp_pass += ps.xdp_pass; stats->xdp_drop += ps.xdp_drop; u64_stats_update_end(&stats->syncp); } /* Update Rx queue management counters */ wmb(); mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); return rx_todo; } static inline void tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); size_t buf_sz = mvpp2_txdesc_size_get(port, desc); if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) dma_unmap_single(port->dev->dev.parent, buf_dma_addr, buf_sz, DMA_TO_DEVICE); mvpp2_txq_desc_put(txq); } static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, struct mvpp2_tx_desc *desc) { /* We only need to clear the low bits */ if (port->priv->hw_version >= MVPP22) desc->pp22.ptp_descriptor &= cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); } static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, struct sk_buff *skb) { struct mvpp2_hwtstamp_queue *queue; unsigned int mtype, type, i; struct ptp_header *hdr; u64 ptpdesc; if (port->priv->hw_version == MVPP21 || port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) return false; type = ptp_classify_raw(skb); if (!type) return false; hdr = ptp_parse_header(skb, type); if (!hdr) return false; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | MVPP22_PTP_ACTION_CAPTURE; queue = &port->tx_hwtstamp_queue[0]; switch (type & PTP_CLASS_VMASK) { case PTP_CLASS_V1: ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); break; case PTP_CLASS_V2: ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); mtype = hdr->tsmt & 15; /* Direct PTP Sync messages to queue 1 */ if (mtype == 0) { ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; queue = &port->tx_hwtstamp_queue[1]; } break; } /* Take a reference on the skb and insert into our queue */ i = queue->next; queue->next = (i + 1) & 31; if (queue->skb[i]) dev_kfree_skb_any(queue->skb[i]); queue->skb[i] = skb_get(skb); ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); /* * 3:0 - PTPAction * 6:4 - PTPPacketFormat * 7 - PTP_CF_WraparoundCheckEn * 9:8 - IngressTimestampSeconds[1:0] * 10 - Reserved * 11 - MACTimestampingEn * 17:12 - PTP_TimestampQueueEntryID[5:0] * 18 - PTPTimestampQueueSelect * 19 - UDPChecksumUpdateEn * 27:20 - TimestampOffset * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header * NTPTs, Y.1731 - L3 to timestamp entry * 35:28 - UDP Checksum Offset * * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) */ tx_desc->pp22.ptp_descriptor &= cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); tx_desc->pp22.ptp_descriptor |= cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); return true; } /* Handle tx fragmentation processing */ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = skb_frag_address(frag); tx_desc = mvpp2_txq_next_desc_get(aggr_txq); mvpp2_txdesc_clear_ptp(port, tx_desc); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { mvpp2_txq_desc_put(txq); goto cleanup; } mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); } else { /* Descriptor in the middle: Not First, Not Last */ mvpp2_txdesc_cmd_set(port, tx_desc, 0); mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); } } return 0; cleanup: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ for (i = i - 1; i >= 0; i--) { tx_desc = txq->descs + i; tx_desc_unmap_put(port, txq, tx_desc); } return -ENOMEM; } static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, struct net_device *dev, struct mvpp2_tx_queue *txq, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_txq_pcpu *txq_pcpu, int hdr_sz) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); dma_addr_t addr; mvpp2_txdesc_clear_ptp(port, tx_desc); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); addr = txq_pcpu->tso_headers_dma + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE); mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); } static inline int mvpp2_tso_put_data(struct sk_buff *skb, struct net_device *dev, struct tso_t *tso, struct mvpp2_tx_queue *txq, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_txq_pcpu *txq_pcpu, int sz, bool left, bool last) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); dma_addr_t buf_dma_addr; mvpp2_txdesc_clear_ptp(port, tx_desc); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); mvpp2_txdesc_size_set(port, tx_desc, sz); buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { mvpp2_txq_desc_put(txq); return -ENOMEM; } mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); if (!left) { mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); if (last) { mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); return 0; } } else { mvpp2_txdesc_cmd_set(port, tx_desc, 0); } mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); return 0; } static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvpp2_tx_queue *txq, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_txq_pcpu *txq_pcpu) { struct mvpp2_port *port = netdev_priv(dev); int hdr_sz, i, len, descs = 0; struct tso_t tso; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0; hdr_sz = tso_start(skb, &tso); len = skb->len - hdr_sz; while (len > 0) { int left = min_t(int, skb_shinfo(skb)->gso_size, len); char *hdr = txq_pcpu->tso_headers + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; len -= left; descs++; tso_build_hdr(skb, hdr, &tso, left, len == 0); mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); while (left > 0) { int sz = min_t(int, tso.size, left); left -= sz; descs++; if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, txq_pcpu, sz, left, len == 0)) goto release; tso_build_data(skb, &tso, sz); } } return descs; release: for (i = descs - 1; i >= 0; i--) { struct mvpp2_tx_desc *tx_desc = txq->descs + i; tx_desc_unmap_put(port, txq, tx_desc); } return 0; } /* Main tx processing */ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; unsigned long flags = 0; unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd; thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; txq_pcpu = per_cpu_ptr(txq->pcpu, thread); aggr_txq = &port->priv->aggr_txqs[thread]; if (test_bit(thread, &port->priv->lock_map)) spin_lock_irqsave(&port->tx_lock[thread], flags); if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); goto out; } frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) mvpp2_txdesc_clear_ptp(port, tx_desc); mvpp2_txdesc_txq_set(port, tx_desc, txq->id); mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { mvpp2_txq_desc_put(txq); frags = 0; goto out; } mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); tx_cmd = mvpp2_skb_tx_csum(port, skb); if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { tx_desc_unmap_put(port, txq, tx_desc); frags = 0; } } out: if (frags > 0) { struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); txq_pcpu->reserved_num -= frags; txq_pcpu->count += frags; aggr_txq->count += frags; /* Enable transmit */ wmb(); mvpp2_aggr_txq_pend_desc_add(port, frags); if (txq_pcpu->count >= txq_pcpu->stop_threshold) netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += skb->len; u64_stats_update_end(&stats->syncp); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } /* Finalize TX processing */ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) mvpp2_txq_done(port, txq, txq_pcpu); /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); if (!port_pcpu->timer_scheduled) { port_pcpu->timer_scheduled = true; hrtimer_start(&port_pcpu->tx_done_timer, MVPP2_TXDONE_HRTIMER_PERIOD_NS, HRTIMER_MODE_REL_PINNED_SOFT); } } if (test_bit(thread, &port->priv->lock_map)) spin_unlock_irqrestore(&port->tx_lock[thread], flags); return NETDEV_TX_OK; } static inline void mvpp2_cause_error(struct net_device *dev, int cause) { if (cause & MVPP2_CAUSE_FCS_ERR_MASK) netdev_err(dev, "FCS error\n"); if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) netdev_err(dev, "rx fifo overrun error\n"); if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) netdev_err(dev, "tx fifo underrun error\n"); } static int mvpp2_poll(struct napi_struct *napi, int budget) { u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); qv = container_of(napi, struct mvpp2_queue_vector, napi); /* Rx/Tx cause register * * Bits 0-15: each bit indicates received packets on the Rx queue * (bit 0 is for Rx queue 0). * * Bits 16-23: each bit indicates transmitted packets on the Tx queue * (bit 16 is for Tx queue 0). * * Each CPU has its own Rx/Tx cause register */ cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { mvpp2_cause_error(port->dev, cause_misc); /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } if (port->has_tx_irqs) { cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; if (cause_tx) { cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); } } /* Process RX packets */ cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { int count; struct mvpp2_rx_queue *rxq; rxq = mvpp2_get_rx_queue(port, cause_rx); if (!rxq) break; count = mvpp2_rx(port, napi, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { /* Clear the bit associated to this Rx queue * so that next iteration will continue from * the next Rx queue. */ cause_rx &= ~(1 << rxq->logic_rxq); } } if (budget > 0) { cause_rx = 0; napi_complete_done(napi, rx_done); mvpp2_qvec_interrupt_enable(qv); } qv->pending_cause_rx = cause_rx; return rx_done; } static void mvpp22_mode_reconfigure(struct mvpp2_port *port, phy_interface_t interface) { u32 ctrl3; /* Set the GMAC & XLG MAC in reset */ mvpp2_mac_reset_assert(port); /* Set the MPCS and XPCS in reset */ mvpp22_pcs_reset_assert(port); /* comphy reconfiguration */ mvpp22_comphy_init(port, interface); /* gop reconfiguration */ mvpp22_gop_init(port, interface); mvpp22_pcs_reset_deassert(port, interface); if (mvpp2_port_supports_xlg(port)) { ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; if (mvpp2_is_xlg(interface)) ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; else ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); } if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) mvpp2_xlg_max_rx_size_set(port); else mvpp2_gmac_max_rx_size_set(port); } /* Set hw internals when starting port */ static void mvpp2_start_dev(struct mvpp2_port *port) { int i; mvpp2_txp_max_tx_size_set(port); for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi); /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port); if (port->priv->hw_version >= MVPP22) mvpp22_mode_reconfigure(port, port->phy_interface); if (port->phylink) { phylink_start(port->phylink); } else { mvpp2_acpi_start(port); } netif_tx_start_all_queues(port->dev); clear_bit(0, &port->state); } /* Set hw internals when stopping port */ static void mvpp2_stop_dev(struct mvpp2_port *port) { int i; set_bit(0, &port->state); /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port); for (i = 0; i < port->nqvecs; i++) napi_disable(&port->qvecs[i].napi); if (port->phylink) phylink_stop(port->phylink); phy_power_off(port->comphy); } static int mvpp2_check_ringparam_valid(struct net_device *dev, struct ethtool_ringparam *ring) { u16 new_rx_pending = ring->rx_pending; u16 new_tx_pending = ring->tx_pending; if (ring->rx_pending == 0 || ring->tx_pending == 0) return -EINVAL; if (ring->rx_pending > MVPP2_MAX_RXD_MAX) new_rx_pending = MVPP2_MAX_RXD_MAX; else if (ring->rx_pending < MSS_THRESHOLD_START) new_rx_pending = MSS_THRESHOLD_START; else if (!IS_ALIGNED(ring->rx_pending, 16)) new_rx_pending = ALIGN(ring->rx_pending, 16); if (ring->tx_pending > MVPP2_MAX_TXD_MAX) new_tx_pending = MVPP2_MAX_TXD_MAX; else if (!IS_ALIGNED(ring->tx_pending, 32)) new_tx_pending = ALIGN(ring->tx_pending, 32); /* The Tx ring size cannot be smaller than the minimum number of * descriptors needed for TSO. */ if (new_tx_pending < MVPP2_MAX_SKB_DESCS) new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); if (ring->rx_pending != new_rx_pending) { netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", ring->rx_pending, new_rx_pending); ring->rx_pending = new_rx_pending; } if (ring->tx_pending != new_tx_pending) { netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", ring->tx_pending, new_tx_pending); ring->tx_pending = new_tx_pending; } return 0; } static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) { u32 mac_addr_l, mac_addr_m, mac_addr_h; mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); addr[0] = (mac_addr_h >> 24) & 0xFF; addr[1] = (mac_addr_h >> 16) & 0xFF; addr[2] = (mac_addr_h >> 8) & 0xFF; addr[3] = mac_addr_h & 0xFF; addr[4] = mac_addr_m & 0xFF; addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; } static int mvpp2_irqs_init(struct mvpp2_port *port) { int err, i; for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); if (!qv->mask) { err = -ENOMEM; goto err; } irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); } err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); if (err) goto err; if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { unsigned int cpu; for_each_present_cpu(cpu) { if (mvpp2_cpu_to_thread(port->priv, cpu) == qv->sw_thread_id) cpumask_set_cpu(cpu, qv->mask); } irq_set_affinity_hint(qv->irq, qv->mask); } } return 0; err: for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; irq_set_affinity_hint(qv->irq, NULL); kfree(qv->mask); qv->mask = NULL; free_irq(qv->irq, qv); } return err; } static void mvpp2_irqs_deinit(struct mvpp2_port *port) { int i; for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; irq_set_affinity_hint(qv->irq, NULL); kfree(qv->mask); qv->mask = NULL; irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); free_irq(qv->irq, qv); } } static bool mvpp22_rss_is_supported(struct mvpp2_port *port) { return (queue_mode == MVPP2_QDIST_MULTI_MODE) && !(port->flags & MVPP2_F_LOOPBACK); } static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2 *priv = port->priv; unsigned char mac_bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; bool valid = false; int err; err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); return err; } err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); return err; } err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); if (err) { netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); return err; } err = mvpp2_prs_def_flow(port); if (err) { netdev_err(dev, "mvpp2_prs_def_flow failed\n"); return err; } /* Allocate the Rx/Tx queues */ err = mvpp2_setup_rxqs(port); if (err) { netdev_err(port->dev, "cannot allocate Rx queues\n"); return err; } err = mvpp2_setup_txqs(port); if (err) { netdev_err(port->dev, "cannot allocate Tx queues\n"); goto err_cleanup_rxqs; } err = mvpp2_irqs_init(port); if (err) { netdev_err(port->dev, "cannot init IRQs\n"); goto err_cleanup_txqs; } if (port->phylink) { err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); if (err) { netdev_err(port->dev, "could not attach PHY (%d)\n", err); goto err_free_irq; } valid = true; } if (priv->hw_version >= MVPP22 && port->port_irq) { err = request_irq(port->port_irq, mvpp2_port_isr, 0, dev->name, port); if (err) { netdev_err(port->dev, "cannot request port link/ptp IRQ %d\n", port->port_irq); goto err_free_irq; } mvpp22_gop_setup_irq(port); /* In default link is down */ netif_carrier_off(port->dev); valid = true; } else { port->port_irq = 0; } if (!valid) { netdev_err(port->dev, "invalid configuration: no dt or link IRQ"); err = -ENOENT; goto err_free_irq; } /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, false); mvpp2_start_dev(port); /* Start hardware statistics gathering */ queue_delayed_work(priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); return 0; err_free_irq: mvpp2_irqs_deinit(port); err_cleanup_txqs: mvpp2_cleanup_txqs(port); err_cleanup_rxqs: mvpp2_cleanup_rxqs(port); return err; } static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; unsigned int thread; mvpp2_stop_dev(port); /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true); if (port->phylink) phylink_disconnect_phy(port->phylink); if (port->port_irq) free_irq(port->port_irq, port); mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { for (thread = 0; thread < port->priv->nthreads; thread++) { port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; } } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); cancel_delayed_work_sync(&port->stats_work); mvpp2_mac_reset_assert(port); mvpp22_pcs_reset_assert(port); return 0; } static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha; int ret; netdev_hw_addr_list_for_each(ha, list) { ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); if (ret) return ret; } return 0; } static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) { if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) mvpp2_prs_vid_enable_filtering(port); else mvpp2_prs_vid_disable_filtering(port); mvpp2_prs_mac_promisc_set(port->priv, port->id, MVPP2_PRS_L2_UNI_CAST, enable); mvpp2_prs_mac_promisc_set(port->priv, port->id, MVPP2_PRS_L2_MULTI_CAST, enable); } static void mvpp2_set_rx_mode(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); /* Clear the whole UC and MC list */ mvpp2_prs_mac_del_all(port); if (dev->flags & IFF_PROMISC) { mvpp2_set_rx_promisc(port, true); return; } mvpp2_set_rx_promisc(port, false); if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || mvpp2_prs_mac_da_accept_list(port, &dev->uc)) mvpp2_prs_mac_promisc_set(port->priv, port->id, MVPP2_PRS_L2_UNI_CAST, true); if (dev->flags & IFF_ALLMULTI) { mvpp2_prs_mac_promisc_set(port->priv, port->id, MVPP2_PRS_L2_MULTI_CAST, true); return; } if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || mvpp2_prs_mac_da_accept_list(port, &dev->mc)) mvpp2_prs_mac_promisc_set(port->priv, port->id, MVPP2_PRS_L2_MULTI_CAST, true); } static int mvpp2_set_mac_address(struct net_device *dev, void *p) { const struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; err = mvpp2_prs_update_mac_da(dev, addr->sa_data); if (err) { /* Reconfigure parser accept the original MAC address */ mvpp2_prs_update_mac_da(dev, dev->dev_addr); netdev_err(dev, "failed to change MAC address\n"); } return err; } /* Shut down all the ports, reconfigure the pools as percpu or shared, * then bring up again all ports. */ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) { bool change_percpu = (percpu != priv->percpu_pools); int numbufs = MVPP2_BM_POOLS_NUM, i; struct mvpp2_port *port = NULL; bool status[MVPP2_MAX_PORTS]; for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; status[i] = netif_running(port->dev); if (status[i]) mvpp2_stop(port->dev); } /* nrxqs is the same for all ports */ if (priv->percpu_pools) numbufs = port->nrxqs * 2; if (change_percpu) mvpp2_bm_pool_update_priv_fc(priv, false); for (i = 0; i < numbufs; i++) mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); devm_kfree(port->dev->dev.parent, priv->bm_pools); priv->percpu_pools = percpu; mvpp2_bm_init(port->dev->dev.parent, priv); for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; if (percpu && port->ntxqs >= num_possible_cpus() * 2) xdp_set_features_flag(port->dev, NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT); else xdp_clear_features_flag(port->dev); mvpp2_swf_bm_pool_init(port); if (status[i]) mvpp2_open(port->dev); } if (change_percpu) mvpp2_bm_pool_update_priv_fc(priv, true); return 0; } static int mvpp2_change_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); bool running = netif_running(dev); struct mvpp2 *priv = port->priv; int err; if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", mtu, (int)MVPP2_MAX_RX_BUF_SIZE); return -EINVAL; } if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); } } else { bool jumbo = false; int i; for (i = 0; i < priv->port_count; i++) if (priv->port_list[i] != port && MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > MVPP2_BM_LONG_PKT_SIZE) { jumbo = true; break; } /* No port is using jumbo frames */ if (!jumbo) { dev_info(port->dev->dev.parent, "all ports have a low MTU, switching to per-cpu buffers"); mvpp2_bm_switch_buffers(priv, true); } } if (running) mvpp2_stop_dev(port); err = mvpp2_bm_update_mtu(dev, mtu); if (err) { netdev_err(dev, "failed to change MTU\n"); /* Reconfigure BM to the original MTU */ mvpp2_bm_update_mtu(dev, dev->mtu); } else { port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); } if (running) { mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); } return err; } static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) { enum dma_data_direction dma_dir = DMA_FROM_DEVICE; struct mvpp2 *priv = port->priv; int err = -1, i; if (!priv->percpu_pools) return err; if (!priv->page_pool[0]) return -ENOMEM; for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; if (port->xdp_prog) { dma_dir = DMA_BIDIRECTIONAL; break; } } /* All pools are equal in terms of DMA direction */ if (priv->page_pool[0]->p.dma_dir != dma_dir) err = mvpp2_bm_switch_buffers(priv, true); return err; } static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; unsigned int cpu; for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; cpu_stats = per_cpu_ptr(port->stats, cpu); do { start = u64_stats_fetch_begin(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; } static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) { struct hwtstamp_config config; void __iomem *ptp; u32 gcr, int_mask; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); int_mask = gcr = 0; if (config.tx_type != HWTSTAMP_TX_OFF) { gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0; } /* It seems we must also release the TX reset when enabling the TSU */ if (config.rx_filter != HWTSTAMP_FILTER_NONE) gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET; if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) mvpp22_tai_start(port->priv->tai); if (config.rx_filter != HWTSTAMP_FILTER_NONE) { config.rx_filter = HWTSTAMP_FILTER_ALL; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | MVPP22_PTP_GCR_TSU_ENABLE, gcr); port->rx_hwtstamp = true; } else { port->rx_hwtstamp = false; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | MVPP22_PTP_GCR_TSU_ENABLE, gcr); } mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0, int_mask); if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) mvpp22_tai_stop(port->priv->tai); port->tx_hwtstamp_type = config.tx_type; if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) return -EFAULT; return 0; } static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) { struct hwtstamp_config config; memset(&config, 0, sizeof(config)); config.tx_type = port->tx_hwtstamp_type; config.rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) return -EFAULT; return 0; } static int mvpp2_ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct mvpp2_port *port = netdev_priv(dev); if (!port->hwtstamp) return -EOPNOTSUPP; info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); switch (cmd) { case SIOCSHWTSTAMP: if (port->hwtstamp) return mvpp2_set_ts_config(port, ifr); break; case SIOCGHWTSTAMP: if (port->hwtstamp) return mvpp2_get_ts_config(port, ifr); break; } if (!port->phylink) return -ENOTSUPP; return phylink_mii_ioctl(port->phylink, ifr, cmd); } static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mvpp2_port *port = netdev_priv(dev); int ret; ret = mvpp2_prs_vid_entry_add(port, vid); if (ret) netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", MVPP2_PRS_VLAN_FILT_MAX - 1); return ret; } static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mvpp2_port *port = netdev_priv(dev); mvpp2_prs_vid_entry_remove(port, vid); return 0; } static int mvpp2_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct mvpp2_port *port = netdev_priv(dev); if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { mvpp2_prs_vid_enable_filtering(port); } else { /* Invalidate all registered VID filters for this * port */ mvpp2_prs_vid_remove_all(port); mvpp2_prs_vid_disable_filtering(port); } } if (changed & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) mvpp22_port_rss_enable(port); else mvpp22_port_rss_disable(port); } return 0; } static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) { struct bpf_prog *prog = bpf->prog, *old_prog; bool running = netif_running(port->dev); bool reset = !prog != !port->xdp_prog; if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); return -EOPNOTSUPP; } if (!port->priv->percpu_pools) { NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); return -EOPNOTSUPP; } if (port->ntxqs < num_possible_cpus() * 2) { NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); return -EOPNOTSUPP; } /* device is up and bpf is added/removed, must setup the RX queues */ if (running && reset) mvpp2_stop(port->dev); old_prog = xchg(&port->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); /* bpf is just replaced, RXQ and MTU are already setup */ if (!reset) return 0; /* device was up, restore the link */ if (running) mvpp2_open(port->dev); /* Check Page Pool DMA Direction */ mvpp2_check_pagepool_dma(port); return 0; } static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct mvpp2_port *port = netdev_priv(dev); switch (xdp->command) { case XDP_SETUP_PROG: return mvpp2_xdp_setup(port, xdp); default: return -EINVAL; } } /* Ethtool methods */ static int mvpp2_ethtool_nway_reset(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phylink) return -ENOTSUPP; return phylink_ethtool_nway_reset(port->phylink); } /* Set interrupt coalescing for ethtools */ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); int queue; for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; mvpp2_rx_pkts_coal_set(port, rxq); mvpp2_rx_time_coal_set(port, rxq); } if (port->has_tx_irqs) { port->tx_time_coal = c->tx_coalesce_usecs; mvpp2_tx_time_coal_set(port); } for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; if (port->has_tx_irqs) mvpp2_tx_pkts_coal_set(port, txq); } return 0; } /* get coalescing for ethtools */ static int mvpp2_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); c->rx_coalesce_usecs = port->rxqs[0]->time_coal; c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; c->tx_coalesce_usecs = port->tx_time_coal; return 0; } static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, MVPP2_DRIVER_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->version, MVPP2_DRIVER_VERSION, sizeof(drvinfo->version)); strscpy(drvinfo->bus_info, dev_name(&dev->dev), sizeof(drvinfo->bus_info)); } static void mvpp2_ethtool_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); ring->rx_max_pending = MVPP2_MAX_RXD_MAX; ring->tx_max_pending = MVPP2_MAX_TXD_MAX; ring->rx_pending = port->rx_ring_size; ring->tx_pending = port->tx_ring_size; } static int mvpp2_ethtool_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); u16 prev_rx_ring_size = port->rx_ring_size; u16 prev_tx_ring_size = port->tx_ring_size; int err; err = mvpp2_check_ringparam_valid(dev, ring); if (err) return err; if (!netif_running(dev)) { port->rx_ring_size = ring->rx_pending; port->tx_ring_size = ring->tx_pending; return 0; } /* The interface is running, so we have to force a * reallocation of the queues */ mvpp2_stop_dev(port); mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); port->rx_ring_size = ring->rx_pending; port->tx_ring_size = ring->tx_pending; err = mvpp2_setup_rxqs(port); if (err) { /* Reallocate Rx queues with the original ring size */ port->rx_ring_size = prev_rx_ring_size; ring->rx_pending = prev_rx_ring_size; err = mvpp2_setup_rxqs(port); if (err) goto err_out; } err = mvpp2_setup_txqs(port); if (err) { /* Reallocate Tx queues with the original ring size */ port->tx_ring_size = prev_tx_ring_size; ring->tx_pending = prev_tx_ring_size; err = mvpp2_setup_txqs(port); if (err) goto err_clean_rxqs; } mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; err_clean_rxqs: mvpp2_cleanup_rxqs(port); err_out: netdev_err(dev, "failed to change ring parameters"); return err; } static void mvpp2_ethtool_get_pause_param(struct net_device *dev, struct ethtool_pauseparam *pause) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phylink) return; phylink_ethtool_get_pauseparam(port->phylink, pause); } static int mvpp2_ethtool_set_pause_param(struct net_device *dev, struct ethtool_pauseparam *pause) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phylink) return -ENOTSUPP; return phylink_ethtool_set_pauseparam(port->phylink, pause); } static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phylink) return -ENOTSUPP; return phylink_ethtool_ksettings_get(port->phylink, cmd); } static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phylink) return -ENOTSUPP; return phylink_ethtool_ksettings_set(port->phylink, cmd); } static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct mvpp2_port *port = netdev_priv(dev); int ret = 0, i, loc = 0; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; switch (info->cmd) { case ETHTOOL_GRXFH: ret = mvpp2_ethtool_rxfh_get(port, info); break; case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = port->n_rfs_rules; break; case ETHTOOL_GRXCLSRULE: ret = mvpp2_ethtool_cls_rule_get(port, info); break; case ETHTOOL_GRXCLSRLALL: for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { if (loc == info->rule_cnt) { ret = -EMSGSIZE; break; } if (port->rfs_rules[i]) rules[loc++] = i; } break; default: return -ENOTSUPP; } return ret; } static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct mvpp2_port *port = netdev_priv(dev); int ret = 0; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; switch (info->cmd) { case ETHTOOL_SRXFH: ret = mvpp2_ethtool_rxfh_set(port, info); break; case ETHTOOL_SRXCLSRLINS: ret = mvpp2_ethtool_cls_rule_ins(port, info); break; case ETHTOOL_SRXCLSRLDEL: ret = mvpp2_ethtool_cls_rule_del(port, info); break; default: return -EOPNOTSUPP; } return ret; } static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; } static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct mvpp2_port *port = netdev_priv(dev); int ret = 0; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (indir) ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir); if (hfunc) *hfunc = ETH_RSS_HASH_CRC32; return ret; } static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mvpp2_port *port = netdev_priv(dev); int ret = 0; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) return -EOPNOTSUPP; if (key) return -EOPNOTSUPP; if (indir) ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir); return ret; } static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc, u32 rss_context) { struct mvpp2_port *port = netdev_priv(dev); int ret = 0; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (rss_context >= MVPP22_N_RSS_TABLES) return -EINVAL; if (hfunc) *hfunc = ETH_RSS_HASH_CRC32; if (indir) ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir); return ret; } static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc, u32 *rss_context, bool delete) { struct mvpp2_port *port = netdev_priv(dev); int ret; if (!mvpp22_rss_is_supported(port)) return -EOPNOTSUPP; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) return -EOPNOTSUPP; if (key) return -EOPNOTSUPP; if (delete) return mvpp22_port_rss_ctx_delete(port, *rss_context); if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { ret = mvpp22_port_rss_ctx_create(port, rss_context); if (ret) return ret; } return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir); } /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_open = mvpp2_open, .ndo_stop = mvpp2_stop, .ndo_start_xmit = mvpp2_tx, .ndo_set_rx_mode = mvpp2_set_rx_mode, .ndo_set_mac_address = mvpp2_set_mac_address, .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, .ndo_eth_ioctl = mvpp2_ioctl, .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, .ndo_xdp_xmit = mvpp2_xdp_xmit, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .nway_reset = mvpp2_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = mvpp2_ethtool_get_ts_info, .set_coalesce = mvpp2_ethtool_set_coalesce, .get_coalesce = mvpp2_ethtool_get_coalesce, .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, .get_strings = mvpp2_ethtool_get_strings, .get_ethtool_stats = mvpp2_ethtool_get_stats, .get_sset_count = mvpp2_ethtool_get_sset_count, .get_pauseparam = mvpp2_ethtool_get_pause_param, .set_pauseparam = mvpp2_ethtool_set_pause_param, .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, .get_rxnfc = mvpp2_ethtool_get_rxnfc, .set_rxnfc = mvpp2_ethtool_set_rxnfc, .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, .get_rxfh_context = mvpp2_ethtool_get_rxfh_context, .set_rxfh_context = mvpp2_ethtool_set_rxfh_context, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that * had a single IRQ defined per-port. */ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { struct mvpp2_queue_vector *v = &port->qvecs[0]; v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; v->sw_thread_id = 0; v->sw_thread_mask = *cpumask_bits(cpu_online_mask); v->port = port; v->irq = irq_of_parse_and_map(port_node, 0); if (v->irq <= 0) return -EINVAL; netif_napi_add(port->dev, &v->napi, mvpp2_poll); port->nqvecs = 1; return 0; } static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret; switch (queue_mode) { case MVPP2_QDIST_SINGLE_MODE: port->nqvecs = priv->nthreads + 1; break; case MVPP2_QDIST_MULTI_MODE: port->nqvecs = priv->nthreads; break; } for (i = 0; i < port->nqvecs; i++) { char irqname[16]; v = port->qvecs + i; v->port = port; v->type = MVPP2_QUEUE_VECTOR_PRIVATE; v->sw_thread_id = i; v->sw_thread_mask = BIT(i); if (port->flags & MVPP2_F_DT_COMPAT) snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); else snprintf(irqname, sizeof(irqname), "hif%d", i); if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i; v->nrxqs = 1; } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && i == (port->nqvecs - 1)) { v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; if (port->flags & MVPP2_F_DT_COMPAT) strncpy(irqname, "rx-shared", sizeof(irqname)); } if (port_node) v->irq = of_irq_get_byname(port_node, irqname); else v->irq = fwnode_irq_get(port->fwnode, i); if (v->irq <= 0) { ret = -EINVAL; goto err; } netif_napi_add(port->dev, &v->napi, mvpp2_poll); } return 0; err: for (i = 0; i < port->nqvecs; i++) irq_dispose_mapping(port->qvecs[i].irq); return ret; } static int mvpp2_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { if (port->has_tx_irqs) return mvpp2_multi_queue_vectors_init(port, port_node); else return mvpp2_simple_queue_vectors_init(port, port_node); } static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) { int i; for (i = 0; i < port->nqvecs; i++) irq_dispose_mapping(port->qvecs[i].irq); } /* Configure Rx queue group interrupt for this port */ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; u32 val; int i; if (priv->hw_version == MVPP21) { mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), port->nrxqs); return; } /* Handle the more complicated PPv2.2 and PPv2.3 case */ for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; if (!qv->nrxqs) continue; val = qv->sw_thread_id; val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); val = qv->first_rxq; val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); } } /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; unsigned int thread; int queue, err, val; /* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) return -EINVAL; /* Disable port */ mvpp2_egress_disable(port); mvpp2_port_disable(port); if (mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } else { val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val &= ~MVPP2_GMAC_FORCE_LINK_PASS; val |= MVPP2_GMAC_FORCE_LINK_DOWN; writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) return -ENOMEM; /* Associate physical Tx queues to this port and initialize. * The mapping is predefined. */ for (queue = 0; queue < port->ntxqs; queue++) { int queue_phy_id = mvpp2_txq_phys(port->id, queue); struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); if (!txq) { err = -ENOMEM; goto err_free_percpu; } txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { err = -ENOMEM; goto err_free_percpu; } txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; for (thread = 0; thread < priv->nthreads; thread++) { txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->thread = thread; } port->txqs[queue] = txq; } port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), GFP_KERNEL); if (!port->rxqs) { err = -ENOMEM; goto err_free_percpu; } /* Allocate and initialize Rx queue for this port */ for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq; /* Map physical Rx queue to port's logical Rx queue */ rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); if (!rxq) { err = -ENOMEM; goto err_free_percpu; } /* Map this Rx queue to a physical queue */ rxq->id = port->first_rxq + queue; rxq->port = port->id; rxq->logic_rxq = queue; port->rxqs[queue] = rxq; } mvpp2_rx_irqs_setup(port); /* Create Rx descriptor rings */ for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->size = port->rx_ring_size; rxq->pkts_coal = MVPP2_RX_COAL_PKTS; rxq->time_coal = MVPP2_RX_COAL_USEC; } mvpp2_ingress_disable(port); /* Port default configuration */ mvpp2_defaults_set(port); /* Port's classifier configuration */ mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); if (mvpp22_rss_is_supported(port)) mvpp22_port_rss_init(port); /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); /* Initialize pools for swf */ err = mvpp2_swf_bm_pool_init(port); if (err) goto err_free_percpu; /* Clear all port stats */ mvpp2_read_stats(port); memset(port->ethtool_stats, 0, MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); return 0; err_free_percpu: for (queue = 0; queue < port->ntxqs; queue++) { if (!port->txqs[queue]) continue; free_percpu(port->txqs[queue]->pcpu); } return err; } static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, unsigned long *flags) { char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3" }; int i; for (i = 0; i < 5; i++) if (of_property_match_string(port_node, "interrupt-names", irqs[i]) < 0) return false; *flags |= MVPP2_F_DT_COMPAT; return true; } /* Checks if the port dt description has the required Tx interrupts: * - PPv2.1: there are no such interrupts. * - PPv2.2 and PPv2.3: * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] * - The new ones have: "hifX" with X in [0..8] * * All those variants are supported to keep the backward compatibility. */ static bool mvpp2_port_has_irqs(struct mvpp2 *priv, struct device_node *port_node, unsigned long *flags) { char name[5]; int i; /* ACPI */ if (!port_node) return true; if (priv->hw_version == MVPP21) return false; if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) return true; for (i = 0; i < MVPP2_MAX_THREADS; i++) { snprintf(name, 5, "hif%d", i); if (of_property_match_string(port_node, "interrupt-names", name) < 0) return false; } return true; } static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, struct fwnode_handle *fwnode, char **mac_from) { struct mvpp2_port *port = netdev_priv(dev); char hw_mac_addr[ETH_ALEN] = {0}; char fw_mac_addr[ETH_ALEN]; int ret; if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { *mac_from = "firmware node"; eth_hw_addr_set(dev, fw_mac_addr); return 0; } if (priv->hw_version == MVPP21) { mvpp21_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { *mac_from = "hardware"; eth_hw_addr_set(dev, hw_mac_addr); return 0; } } /* Only valid on OF enabled platforms */ ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr); if (ret == -EPROBE_DEFER) return ret; if (!ret) { *mac_from = "nvmem cell"; eth_hw_addr_set(dev, fw_mac_addr); return 0; } *mac_from = "random"; eth_hw_addr_random(dev); return 0; } static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) { return container_of(config, struct mvpp2_port, phylink_config); } static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) { return container_of(pcs, struct mvpp2_port, pcs_xlg); } static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) { return container_of(pcs, struct mvpp2_port, pcs_gmac); } static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); u32 val; if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) state->speed = SPEED_5000; else state->speed = SPEED_10000; state->duplex = 1; state->an_complete = 1; val = readl(port->base + MVPP22_XLG_STATUS); state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); state->pause = 0; val = readl(port->base + MVPP22_XLG_CTRL0_REG); if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) state->pause |= MLO_PAUSE_TX; if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) state->pause |= MLO_PAUSE_RX; } static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { return 0; } static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_get_state = mvpp2_xlg_pcs_get_state, .pcs_config = mvpp2_xlg_pcs_config, }; static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state) { /* When in 802.3z mode, we must have AN enabled: * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1. */ if (phy_interface_mode_is_8023z(state->interface) && !phylink_test(state->advertising, Autoneg)) return -EINVAL; return 0; } static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val; val = readl(port->base + MVPP2_GMAC_STATUS0); state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); switch (port->phy_interface) { case PHY_INTERFACE_MODE_1000BASEX: state->speed = SPEED_1000; break; case PHY_INTERFACE_MODE_2500BASEX: state->speed = SPEED_2500; break; default: if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) state->speed = SPEED_1000; else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) state->speed = SPEED_100; else state->speed = SPEED_10; } state->pause = 0; if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) state->pause |= MLO_PAUSE_RX; if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) state->pause |= MLO_PAUSE_TX; } static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 mask, val, an, old_an, changed; mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | MVPP2_GMAC_AN_DUPLEX_EN; if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { mask |= MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_CONFIG_FULL_DUPLEX; val = MVPP2_GMAC_IN_BAND_AUTONEG; if (interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the speed and duplex from PHY */ val |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_AN_DUPLEX_EN; } else { /* 802.3z mode has fixed speed and duplex */ val |= MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_CONFIG_FULL_DUPLEX; /* The FLOW_CTRL_AUTONEG bit selects either the hardware * automatically or the bits in MVPP22_GMAC_CTRL_4_REG * manually controls the GMAC pause modes. */ if (permit_pause_to_mac) val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; /* Configure advertisement bits */ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; if (phylink_test(advertising, Pause)) val |= MVPP2_GMAC_FC_ADV_EN; if (phylink_test(advertising, Asym_Pause)) val |= MVPP2_GMAC_FC_ADV_ASM_EN; } } else { val = 0; } old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); an = (an & ~mask) | val; changed = an ^ old_an; if (changed) writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); /* We are only interested in the advertisement bits changing */ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); } static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) { struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { .pcs_validate = mvpp2_gmac_pcs_validate, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, }; static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { u32 val; mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, MVPP22_XLG_CTRL0_MAC_RESET_DIS, MVPP22_XLG_CTRL0_MAC_RESET_DIS); mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | MVPP22_XLG_CTRL4_EN_IDLE_CHECK | MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); /* Wait for reset to deassert */ do { val = readl(port->base + MVPP22_XLG_CTRL0_REG); } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); } static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { u32 old_ctrl0, ctrl0; u32 old_ctrl2, ctrl2; u32 old_ctrl4, ctrl4; old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); /* Configure port type */ if (phy_interface_mode_is_8023z(state->interface)) { ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; } else if (phy_interface_mode_is_rgmii(state->interface)) { ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; } /* Configure negotiation style */ if (!phylink_autoneg_inband(mode)) { /* Phy or fixed speed - no in-band AN, nothing to do, leave the * configured speed, duplex and flow control as-is. */ } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII in-band mode receives the speed and duplex from * the PHY. Flow control information is not received. */ } else if (phy_interface_mode_is_8023z(state->interface)) { /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can * they negotiate duplex: they are always operating with a fixed * speed of 1000/2500Mbps in full duplex, so force 1000/2500 * speed and full duplex here. */ ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; } if (old_ctrl0 != ctrl0) writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); if (old_ctrl2 != ctrl2) writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); if (old_ctrl4 != ctrl4) writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); } static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); /* Select the appropriate PCS operations depending on the * configured interface mode. We will only switch to a mode * that the validate() checks have already passed. */ if (mvpp2_is_xlg(interface)) return &port->pcs_xlg; else return &port->pcs_gmac; } static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); /* Check for invalid configuration */ if (mvpp2_is_xlg(interface) && port->gop_id != 0) { netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); return -EINVAL; } if (port->phy_interface != interface || phylink_autoneg_inband(mode)) { /* Force the link down when changing the interface or if in * in-band mode to ensure we do not change the configuration * while the hardware is indicating link is up. We force both * XLG and GMAC down to ensure that they're both in a known * state. */ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, MVPP2_GMAC_FORCE_LINK_PASS | MVPP2_GMAC_FORCE_LINK_DOWN, MVPP2_GMAC_FORCE_LINK_DOWN); if (mvpp2_port_supports_xlg(port)) mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, MVPP22_XLG_CTRL0_FORCE_LINK_PASS | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); } /* Make sure the port is disabled when reconfiguring the mode */ mvpp2_port_disable(port); if (port->phy_interface != interface) { /* Place GMAC into reset */ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, MVPP2_GMAC_PORT_RESET_MASK, MVPP2_GMAC_PORT_RESET_MASK); if (port->priv->hw_version >= MVPP22) { mvpp22_gop_mask_irq(port); phy_power_off(port->comphy); /* Reconfigure the serdes lanes */ mvpp22_mode_reconfigure(port, interface); } } return 0; } static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); /* mac (re)configuration */ if (mvpp2_is_xlg(state->interface)) mvpp2_xlg_config(port, mode, state); else if (phy_interface_mode_is_rgmii(state->interface) || phy_interface_mode_is_8023z(state->interface) || state->interface == PHY_INTERFACE_MODE_SGMII) mvpp2_gmac_config(port, mode, state); if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port, state); } static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); if (port->priv->hw_version >= MVPP22 && port->phy_interface != interface) { port->phy_interface = interface; /* Unmask interrupts */ mvpp22_gop_unmask_irq(port); } if (!mvpp2_is_xlg(interface)) { /* Release GMAC reset and wait */ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, MVPP2_GMAC_PORT_RESET_MASK, 0); while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & MVPP2_GMAC_PORT_RESET_MASK) continue; } mvpp2_port_enable(port); /* Allow the link to come up if in in-band mode, otherwise the * link is forced via mac_link_down()/mac_link_up() */ if (phylink_autoneg_inband(mode)) { if (mvpp2_is_xlg(interface)) mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, MVPP22_XLG_CTRL0_FORCE_LINK_PASS | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); else mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, MVPP2_GMAC_FORCE_LINK_PASS | MVPP2_GMAC_FORCE_LINK_DOWN, 0); } return 0; } static void mvpp2_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; int i; if (mvpp2_is_xlg(interface)) { if (!phylink_autoneg_inband(mode)) { val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; if (tx_pause) val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; if (rx_pause) val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); } } else { if (!phylink_autoneg_inband(mode)) { val = MVPP2_GMAC_FORCE_LINK_PASS; if (speed == SPEED_1000 || speed == SPEED_2500) val |= MVPP2_GMAC_CONFIG_GMII_SPEED; else if (speed == SPEED_100) val |= MVPP2_GMAC_CONFIG_MII_SPEED; if (duplex == DUPLEX_FULL) val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS | MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); } /* We can always update the flow control enable bits; * these will only be effective if flow control AN * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. */ val = 0; if (tx_pause) val |= MVPP22_CTRL4_TX_FC_EN; if (rx_pause) val |= MVPP22_CTRL4_RX_FC_EN; mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, val); } if (port->priv->global_tx_fc) { port->tx_fc = tx_pause; if (tx_pause) mvpp2_rxq_enable_fc(port); else mvpp2_rxq_disable_fc(port); if (port->priv->percpu_pools) { for (i = 0; i < port->nrxqs; i++) mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); } else { mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); } if (port->priv->hw_version == MVPP23) mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); } mvpp2_port_enable(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); netif_tx_wake_all_queues(port->dev); } static void mvpp2_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; if (!phylink_autoneg_inband(mode)) { if (mvpp2_is_xlg(interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } else { val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val &= ~MVPP2_GMAC_FORCE_LINK_PASS; val |= MVPP2_GMAC_FORCE_LINK_DOWN; writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } } netif_tx_stop_all_queues(port->dev); mvpp2_egress_disable(port); mvpp2_ingress_disable(port); mvpp2_port_disable(port); } static const struct phylink_mac_ops mvpp2_phylink_ops = { .mac_select_pcs = mvpp2_select_pcs, .mac_prepare = mvpp2_mac_prepare, .mac_config = mvpp2_mac_config, .mac_finish = mvpp2_mac_finish, .mac_link_up = mvpp2_mac_link_up, .mac_link_down = mvpp2_mac_link_down, }; /* Work-around for ACPI */ static void mvpp2_acpi_start(struct mvpp2_port *port) { /* Phylink isn't used as of now for ACPI, so the MAC has to be * configured manually when the interface is started. This will * be removed as soon as the phylink ACPI support lands in. */ struct phylink_link_state state = { .interface = port->phy_interface, }; struct phylink_pcs *pcs; pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, port->phy_interface); mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED, port->phy_interface, state.advertising, false); mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, port->phy_interface); mvpp2_mac_link_up(&port->phylink_config, NULL, MLO_AN_INBAND, port->phy_interface, SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); } /* In order to ensure backward compatibility for ACPI, check if the port * firmware node comprises the necessary description allowing to use phylink. */ static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) { if (!is_acpi_node(port_fwnode)) return false; return (!fwnode_property_present(port_fwnode, "phy-handle") && !fwnode_property_present(port_fwnode, "managed") && !fwnode_get_named_child_node(port_fwnode, "fixed-link")); } /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct fwnode_handle *port_fwnode, struct mvpp2 *priv) { struct phy *comphy = NULL; struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; struct device_node *port_node = to_of_node(port_fwnode); netdev_features_t features; struct net_device *dev; struct phylink *phylink; char *mac_from = ""; unsigned int ntxqs, nrxqs, thread; unsigned long flags = 0; bool has_tx_irqs; u32 id; int phy_mode; int err, i; has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { dev_err(&pdev->dev, "not enough IRQs to support multi queue mode\n"); return -EINVAL; } ntxqs = MVPP2_MAX_TXQ; nrxqs = mvpp2_get_nrxqs(priv); dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) return -ENOMEM; phy_mode = fwnode_get_phy_mode(port_fwnode); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy mode\n"); err = phy_mode; goto err_free_netdev; } /* * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. * Existing usage of 10GBASE-KR is not correct; no backplane * negotiation is done, and this driver does not actually support * 10GBASE-KR. */ if (phy_mode == PHY_INTERFACE_MODE_10GKR) phy_mode = PHY_INTERFACE_MODE_10GBASER; if (port_node) { comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); if (IS_ERR(comphy)) { if (PTR_ERR(comphy) == -EPROBE_DEFER) { err = -EPROBE_DEFER; goto err_free_netdev; } comphy = NULL; } } if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { err = -EINVAL; dev_err(&pdev->dev, "missing port-id value\n"); goto err_free_netdev; } dev->tx_queue_len = MVPP2_MAX_TXD_MAX; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvpp2_netdev_ops; dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); port->dev = dev; port->fwnode = port_fwnode; port->ntxqs = ntxqs; port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; port->flags = flags; err = mvpp2_queue_vectors_init(port, port_node); if (err) goto err_free_netdev; if (port_node) port->port_irq = of_irq_get_byname(port_node, "link"); else port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); if (port->port_irq == -EPROBE_DEFER) { err = -EPROBE_DEFER; goto err_deinit_qvecs; } if (port->port_irq <= 0) /* the link irq is optional */ port->port_irq = 0; if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; port->id = id; if (priv->hw_version == MVPP21) port->first_rxq = port->id * port->nrxqs; else port->first_rxq = port->id * priv->max_port_rxqs; port->of_node = port_node; port->phy_interface = phy_mode; port->comphy = comphy; if (priv->hw_version == MVPP21) { port->base = devm_platform_ioremap_resource(pdev, 2 + id); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); goto err_free_irq; } port->stats_base = port->priv->lms_base + MVPP21_MIB_COUNTERS_OFFSET + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; } else { if (fwnode_property_read_u32(port_fwnode, "gop-port-id", &port->gop_id)) { err = -EINVAL; dev_err(&pdev->dev, "missing gop-port-id value\n"); goto err_deinit_qvecs; } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); port->stats_base = port->priv->iface_base + MVPP22_MIB_COUNTERS_OFFSET + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; /* We may want a property to describe whether we should use * MAC hardware timestamping. */ if (priv->tai) port->hwtstamp = true; } /* Alloc per-cpu and ethtool stats */ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; goto err_free_irq; } port->ethtool_stats = devm_kcalloc(&pdev->dev, MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), sizeof(u64), GFP_KERNEL); if (!port->ethtool_stats) { err = -ENOMEM; goto err_free_stats; } mutex_init(&port->gather_stats_lock); INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); if (err < 0) goto err_free_stats; port->tx_ring_size = MVPP2_MAX_TXD_DFLT; port->rx_ring_size = MVPP2_MAX_RXD_DFLT; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); if (err < 0) { dev_err(&pdev->dev, "failed to init port %d\n", id); goto err_free_stats; } mvpp2_port_periodic_xon_disable(port); mvpp2_mac_reset_assert(port); mvpp22_pcs_reset_assert(port); port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); if (!port->pcpu) { err = -ENOMEM; goto err_free_txq_pcpu; } if (!port->has_tx_irqs) { for (thread = 0; thread < priv->nthreads; thread++) { port_pcpu = per_cpu_ptr(port->pcpu, thread); hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED_SOFT); port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; port_pcpu->timer_scheduled = false; port_pcpu->dev = dev; } } features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; if (mvpp22_rss_is_supported(port)) { dev->hw_features |= NETIF_F_RXHASH; dev->features |= NETIF_F_NTUPLE; } if (!port->priv->percpu_pools) mvpp2_set_hw_csum(port, port->pool_long->id); else if (port->ntxqs >= num_possible_cpus() * 2) dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; dev->vlan_features |= features; netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ dev->min_mtu = ETH_MIN_MTU; /* 9704 == 9728 - 20 and rounding to 8 */ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; dev->dev.of_node = port_node; port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; port->pcs_gmac.neg_mode = true; port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; port->pcs_xlg.neg_mode = true; if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; port->phylink_config.mac_capabilities = MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; if (port->priv->global_tx_fc) port->phylink_config.mac_capabilities |= MAC_SYM_PAUSE | MAC_ASYM_PAUSE; if (mvpp2_port_supports_xlg(port)) { /* If a COMPHY is present, we can support any of * the serdes modes and switch between them. */ if (comphy) { __set_bit(PHY_INTERFACE_MODE_5GBASER, port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_10GBASER, port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_XAUI, port->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { __set_bit(PHY_INTERFACE_MODE_5GBASER, port->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { __set_bit(PHY_INTERFACE_MODE_10GBASER, port->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { __set_bit(PHY_INTERFACE_MODE_XAUI, port->phylink_config.supported_interfaces); } if (comphy) port->phylink_config.mac_capabilities |= MAC_10000FD | MAC_5000FD; else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) port->phylink_config.mac_capabilities |= MAC_5000FD; else port->phylink_config.mac_capabilities |= MAC_10000FD; } if (mvpp2_port_supports_rgmii(port)) phy_interface_set_rgmii(port->phylink_config.supported_interfaces); if (comphy) { /* If a COMPHY is present, we can support any of the * serdes modes and switch between them. */ __set_bit(PHY_INTERFACE_MODE_SGMII, port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_2500BASEX, port->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { /* No COMPHY, with only 2500BASE-X mode supported */ __set_bit(PHY_INTERFACE_MODE_2500BASEX, port->phylink_config.supported_interfaces); } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || phy_mode == PHY_INTERFACE_MODE_SGMII) { /* No COMPHY, we can switch between 1000BASE-X and SGMII */ __set_bit(PHY_INTERFACE_MODE_1000BASEX, port->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_SGMII, port->phylink_config.supported_interfaces); } phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { err = PTR_ERR(phylink); goto err_free_port_pcpu; } port->phylink = phylink; } else { dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); port->phylink = NULL; } /* Cycle the comphy to power it down, saving 270mW per port - * don't worry about an error powering it up. When the comphy * driver does this, we can remove this code. */ if (port->comphy) { err = mvpp22_comphy_init(port, port->phy_interface); if (err == 0) phy_power_off(port->comphy); } err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); goto err_phylink; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); priv->port_list[priv->port_count++] = port; return 0; err_phylink: if (port->phylink) phylink_destroy(port->phylink); err_free_port_pcpu: free_percpu(port->pcpu); err_free_txq_pcpu: for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); err_free_irq: if (port->port_irq) irq_dispose_mapping(port->port_irq); err_deinit_qvecs: mvpp2_queue_vectors_deinit(port); err_free_netdev: free_netdev(dev); return err; } /* Ports removal routine */ static void mvpp2_port_remove(struct mvpp2_port *port) { int i; unregister_netdev(port->dev); if (port->phylink) phylink_destroy(port->phylink); free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); mvpp2_queue_vectors_deinit(port); if (port->port_irq) irq_dispose_mapping(port->port_irq); free_netdev(port->dev); } /* Initialize decoding windows */ static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, struct mvpp2 *priv) { u32 win_enable; int i; for (i = 0; i < 6; i++) { mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); if (i < 4) mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); } win_enable = 0; for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; mvpp2_write(priv, MVPP2_WIN_BASE(i), (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); mvpp2_write(priv, MVPP2_WIN_SIZE(i), (cs->size - 1) & 0xffff0000); win_enable |= (1 << i); } mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); } /* Initialize Rx FIFO's */ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) { int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT); mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) { int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); } /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. * 4kB fixed space must be assigned for the loopback port. * Redistribute remaining avialable 44kB space among all active ports. * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G * SGMII link. */ static void mvpp22_rx_fifo_init(struct mvpp2 *priv) { int remaining_ports_count; unsigned long port_map; int size_remainder; int port, size; /* The loopback requires fixed 4kB of the FIFO space assignment. */ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); /* Set RX FIFO size to 0 for inactive ports. */ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) mvpp22_rx_fifo_set_hw(priv, port, 0); /* Assign remaining RX FIFO space among all active ports. */ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; remaining_ports_count = hweight_long(port_map); for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { if (remaining_ports_count == 1) size = size_remainder; else if (port == 0) size = max(size_remainder / remaining_ports_count, MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); else if (port == 1) size = max(size_remainder / remaining_ports_count, MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); else size = size_remainder / remaining_ports_count; size_remainder -= size; remaining_ports_count--; mvpp22_rx_fifo_set_hw(priv, port, size); } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT); mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } /* Configure Rx FIFO Flow control thresholds */ static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) { int port, val; /* Port 0: maximum speed -10Gb/s port * required by spec RX FIFO threshold 9KB * Port 1: maximum speed -5Gb/s port * required by spec RX FIFO threshold 4KB * Port 2: maximum speed -1Gb/s port * required by spec RX FIFO threshold 2KB */ /* Without loopback port */ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { if (port == 0) { val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) << MVPP2_RX_FC_TRSH_OFFS; val &= MVPP2_RX_FC_TRSH_MASK; mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); } else if (port == 1) { val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) << MVPP2_RX_FC_TRSH_OFFS; val &= MVPP2_RX_FC_TRSH_MASK; mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); } else { val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) << MVPP2_RX_FC_TRSH_OFFS; val &= MVPP2_RX_FC_TRSH_MASK; mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); } } } /* Configure Rx FIFO Flow control thresholds */ void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) { int val; val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); if (en) val |= MVPP2_RX_FC_EN; else val &= ~MVPP2_RX_FC_EN; mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); } static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) { int threshold = MVPP2_TX_FIFO_THRESHOLD(size); mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); } /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. * 1kB fixed space must be assigned for the loopback port. * Redistribute remaining avialable 18kB space among all active ports. * The 10G interface should use 10kB (which is maximum possible size * per single port). */ static void mvpp22_tx_fifo_init(struct mvpp2 *priv) { int remaining_ports_count; unsigned long port_map; int size_remainder; int port, size; /* The loopback requires fixed 1kB of the FIFO space assignment. */ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, MVPP22_TX_FIFO_DATA_SIZE_1KB); port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); /* Set TX FIFO size to 0 for inactive ports. */ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) mvpp22_tx_fifo_set_hw(priv, port, 0); /* Assign remaining TX FIFO space among all active ports. */ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; remaining_ports_count = hweight_long(port_map); for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { if (remaining_ports_count == 1) size = min(size_remainder, MVPP22_TX_FIFO_DATA_SIZE_10KB); else if (port == 0) size = MVPP22_TX_FIFO_DATA_SIZE_10KB; else size = size_remainder / remaining_ports_count; size_remainder -= size; remaining_ports_count--; mvpp22_tx_fifo_set_hw(priv, port, size); } } static void mvpp2_axi_init(struct mvpp2 *priv) { u32 val, rdval, wrval; mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); /* AXI Bridge Configuration */ rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS; rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS; wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS; wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS; /* BM */ mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); /* Descriptors */ mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); /* Buffer Data */ mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); val = MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS; val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS; mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); val = MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS; val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS; mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); val = MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS; val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS; mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); } /* Initialize network controller common part HW */ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) { const struct mbus_dram_target_info *dram_target_info; int err, i; u32 val; /* MBUS windows configuration */ dram_target_info = mv_mbus_dram_info(); if (dram_target_info) mvpp2_conf_mbus_windows(dram_target_info, priv); if (priv->hw_version >= MVPP22) mvpp2_axi_init(priv); /* Disable HW PHY polling */ if (priv->hw_version == MVPP21) { val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); val |= MVPP2_PHY_AN_STOP_SMI0_MASK; writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); } else { val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); val &= ~MVPP22_SMI_POLLING_EN; writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); } /* Allocate and initialize aggregated TXQs */ priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); if (err < 0) return err; } /* Fifo Init */ if (priv->hw_version == MVPP21) { mvpp2_rx_fifo_init(priv); } else { mvpp22_rx_fifo_init(priv); mvpp22_tx_fifo_init(priv); if (priv->hw_version == MVPP23) mvpp23_rx_fifo_fc_set_tresh(priv); } if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); /* Allow cache snoop when transmiting packets */ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); /* Buffer Manager initialization */ err = mvpp2_bm_init(&pdev->dev, priv); if (err < 0) return err; /* Parser default initialization */ err = mvpp2_prs_default_init(pdev, priv); if (err < 0) return err; /* Classifier default initialization */ mvpp2_cls_init(priv); return 0; } static int mvpp2_get_sram(struct platform_device *pdev, struct mvpp2 *priv) { struct resource *res; void __iomem *base; res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) { if (has_acpi_companion(&pdev->dev)) dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n"); else dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n"); return 0; } base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); priv->cm3_base = base; return 0; } static int mvpp2_probe(struct platform_device *pdev) { struct fwnode_handle *fwnode = pdev->dev.fwnode; struct fwnode_handle *port_fwnode; struct mvpp2 *priv; struct resource *res; void __iomem *base; int i, shared; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev); /* multi queue mode isn't supported on PPV2.1, fallback to single * mode */ if (priv->hw_version == MVPP21) queue_mode = MVPP2_QDIST_SINGLE_MODE; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); if (priv->hw_version == MVPP21) { priv->lms_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(priv->lms_base)) return PTR_ERR(priv->lms_base); } else { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(&pdev->dev, "Invalid resource\n"); return -EINVAL; } if (has_acpi_companion(&pdev->dev)) { /* In case the MDIO memory region is declared in * the ACPI, it can already appear as 'in-use' * in the OS. Because it is overlapped by second * region of the network controller, make * sure it is released, before requesting it again. * The care is taken by mvpp2 driver to avoid * concurrent access to this memory region. */ release_resource(res); } priv->iface_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->iface_base)) return PTR_ERR(priv->iface_base); /* Map CM3 SRAM */ err = mvpp2_get_sram(pdev, priv); if (err) dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n"); /* Enable global Flow Control only if handler to SRAM not NULL */ if (priv->cm3_base) priv->global_tx_fc = true; } if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) { priv->sysctrl_base = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "marvell,system-controller"); if (IS_ERR(priv->sysctrl_base)) /* The system controller regmap is optional for dt * compatibility reasons. When not provided, the * configuration of the GoP relies on the * firmware/bootloader. */ priv->sysctrl_base = NULL; } if (priv->hw_version >= MVPP22 && mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) priv->percpu_pools = 1; mvpp2_setup_bm_pool(); priv->nthreads = min_t(unsigned int, num_present_cpus(), MVPP2_MAX_THREADS); shared = num_present_cpus() - priv->nthreads; if (shared > 0) bitmap_set(&priv->lock_map, 0, min_t(int, shared, MVPP2_MAX_THREADS)); for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; addr_space_sz = (priv->hw_version == MVPP21 ? MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); priv->swth_base[i] = base + i * addr_space_sz; } if (priv->hw_version == MVPP21) priv->max_port_rxqs = 8; else priv->max_port_rxqs = 32; if (dev_of_node(&pdev->dev)) { priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); if (IS_ERR(priv->pp_clk)) return PTR_ERR(priv->pp_clk); err = clk_prepare_enable(priv->pp_clk); if (err < 0) return err; priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); if (IS_ERR(priv->gop_clk)) { err = PTR_ERR(priv->gop_clk); goto err_pp_clk; } err = clk_prepare_enable(priv->gop_clk); if (err < 0) goto err_pp_clk; if (priv->hw_version >= MVPP22) { priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); if (IS_ERR(priv->mg_clk)) { err = PTR_ERR(priv->mg_clk); goto err_gop_clk; } err = clk_prepare_enable(priv->mg_clk); if (err < 0) goto err_gop_clk; priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk"); if (IS_ERR(priv->mg_core_clk)) { err = PTR_ERR(priv->mg_core_clk); goto err_mg_clk; } err = clk_prepare_enable(priv->mg_core_clk); if (err < 0) goto err_mg_clk; } priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk"); if (IS_ERR(priv->axi_clk)) { err = PTR_ERR(priv->axi_clk); goto err_mg_core_clk; } err = clk_prepare_enable(priv->axi_clk); if (err < 0) goto err_mg_core_clk; /* Get system's tclk rate */ priv->tclk = clk_get_rate(priv->pp_clk); } else { err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk); if (err) { dev_err(&pdev->dev, "missing clock-frequency value\n"); return err; } } if (priv->hw_version >= MVPP22) { err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); if (err) goto err_axi_clk; /* Sadly, the BM pools all share the same register to * store the high 32 bits of their address. So they * must all have the same high 32 bits, which forces * us to restrict coherent memory to DMA_BIT_MASK(32). */ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) goto err_axi_clk; } /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) priv->port_map |= BIT(i); } if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; /* Init mss lock */ spin_lock_init(&priv->mss_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); goto err_axi_clk; } err = mvpp22_tai_probe(&pdev->dev, priv); if (err < 0) goto err_axi_clk; /* Initialize ports */ fwnode_for_each_available_child_node(fwnode, port_fwnode) { err = mvpp2_port_probe(pdev, port_fwnode, priv); if (err < 0) goto err_port_probe; } if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; goto err_axi_clk; } /* Statistics must be gathered regularly because some of them (like * packets counters) are 32-bit registers and could overflow quite * quickly. For instance, a 10Gb link used at full bandwidth with the * smallest packets (64B) will overflow a 32-bit counter in less than * 30 seconds. Then, use a workqueue to fill 64-bit counters. */ snprintf(priv->queue_name, sizeof(priv->queue_name), "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), priv->port_count > 1 ? "+" : ""); priv->stats_queue = create_singlethread_workqueue(priv->queue_name); if (!priv->stats_queue) { err = -ENOMEM; goto err_port_probe; } if (priv->global_tx_fc && priv->hw_version >= MVPP22) { err = mvpp2_enable_global_fc(priv); if (err) dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n"); } mvpp2_dbgfs_init(priv, pdev->name); platform_set_drvdata(pdev, priv); return 0; err_port_probe: fwnode_handle_put(port_fwnode); i = 0; fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) mvpp2_port_remove(priv->port_list[i]); i++; } err_axi_clk: clk_disable_unprepare(priv->axi_clk); err_mg_core_clk: clk_disable_unprepare(priv->mg_core_clk); err_mg_clk: clk_disable_unprepare(priv->mg_clk); err_gop_clk: clk_disable_unprepare(priv->gop_clk); err_pp_clk: clk_disable_unprepare(priv->pp_clk); return err; } static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; int i = 0, poolnum = MVPP2_BM_POOLS_NUM; struct fwnode_handle *port_fwnode; mvpp2_dbgfs_cleanup(priv); fwnode_for_each_available_child_node(fwnode, port_fwnode) { if (priv->port_list[i]) { mutex_destroy(&priv->port_list[i]->gather_stats_lock); mvpp2_port_remove(priv->port_list[i]); } i++; } destroy_workqueue(priv->stats_queue); if (priv->percpu_pools) poolnum = mvpp2_get_nrxqs(priv) * 2; for (i = 0; i < poolnum; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); } for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; dma_free_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, aggr_txq->descs, aggr_txq->descs_dma); } if (is_acpi_node(port_fwnode)) return 0; clk_disable_unprepare(priv->axi_clk); clk_disable_unprepare(priv->mg_core_clk); clk_disable_unprepare(priv->mg_clk); clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); return 0; } static const struct of_device_id mvpp2_match[] = { { .compatible = "marvell,armada-375-pp2", .data = (void *)MVPP21, }, { .compatible = "marvell,armada-7k-pp22", .data = (void *)MVPP22, }, { } }; MODULE_DEVICE_TABLE(of, mvpp2_match); #ifdef CONFIG_ACPI static const struct acpi_device_id mvpp2_acpi_match[] = { { "MRVL0110", MVPP22 }, { }, }; MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); #endif static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, .remove = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), }, }; static int __init mvpp2_driver_init(void) { return platform_driver_register(&mvpp2_driver); } module_init(mvpp2_driver_init); static void __exit mvpp2_driver_exit(void) { platform_driver_unregister(&mvpp2_driver); mvpp2_dbgfs_exit(); } module_exit(mvpp2_driver_exit); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2018 Marvell */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/debugfs.h> #include "mvpp2.h" #include "mvpp2_prs.h" #include "mvpp2_cls.h" struct mvpp2_dbgfs_prs_entry { int tid; struct mvpp2 *priv; }; struct mvpp2_dbgfs_c2_entry { int id; struct mvpp2 *priv; }; struct mvpp2_dbgfs_flow_entry { int flow; struct mvpp2 *priv; }; struct mvpp2_dbgfs_flow_tbl_entry { int id; struct mvpp2 *priv; }; struct mvpp2_dbgfs_port_flow_entry { struct mvpp2_port *port; struct mvpp2_dbgfs_flow_entry *dbg_fe; }; struct mvpp2_dbgfs_entries { /* Entries for Header Parser debug info */ struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE]; /* Entries for Classifier C2 engine debug info */ struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES]; /* Entries for Classifier Flow Table debug info */ struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE]; /* Entries for Classifier flows debug info */ struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS]; /* Entries for per-port flows debug info */ struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS]; }; static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private; u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id); seq_printf(s, "%u\n", hits); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits); static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_flow_entry *entry = s->private; u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); seq_printf(s, "%u\n", hits); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_flow_entry *entry = s->private; const struct mvpp2_cls_flow *f; const char *flow_name; f = mvpp2_cls_flow_get(entry->flow); if (!f) return -EINVAL; switch (f->flow_type) { case IPV4_FLOW: flow_name = "ipv4"; break; case IPV6_FLOW: flow_name = "ipv6"; break; case TCP_V4_FLOW: flow_name = "tcp4"; break; case TCP_V6_FLOW: flow_name = "tcp6"; break; case UDP_V4_FLOW: flow_name = "udp4"; break; case UDP_V6_FLOW: flow_name = "udp6"; break; default: flow_name = "other"; } seq_printf(s, "%s\n", flow_name); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type); static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) { const struct mvpp2_dbgfs_flow_entry *entry = s->private; const struct mvpp2_cls_flow *f; f = mvpp2_cls_flow_get(entry->flow); if (!f) return -EINVAL; seq_printf(s, "%d\n", f->flow_id); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id); static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_port_flow_entry *entry = s->private; struct mvpp2_port *port = entry->port; struct mvpp2_cls_flow_entry fe; const struct mvpp2_cls_flow *f; int flow_index; u16 hash_opts; f = mvpp2_cls_flow_get(entry->dbg_fe->flow); if (!f) return -EINVAL; flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); hash_opts = mvpp2_flow_get_hek_fields(&fe); seq_printf(s, "0x%04x\n", hash_opts); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt); static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_port_flow_entry *entry = s->private; struct mvpp2_port *port = entry->port; struct mvpp2_cls_flow_entry fe; const struct mvpp2_cls_flow *f; int flow_index, engine; f = mvpp2_cls_flow_get(entry->dbg_fe->flow); if (!f) return -EINVAL; flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); engine = mvpp2_cls_flow_eng_get(&fe); seq_printf(s, "%d\n", engine); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_c2_entry *entry = s->private; u32 hits; hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id); seq_printf(s, "%u\n", hits); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_c2_entry *entry = s->private; struct mvpp2_cls_c2_entry c2; u8 qh, ql; mvpp2_cls_c2_read(entry->priv, entry->id, &c2); qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) & MVPP22_CLS_C2_ATTR0_QLOW_MASK; seq_printf(s, "%d\n", (qh << 3 | ql)); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_c2_entry *entry = s->private; struct mvpp2_cls_c2_entry c2; int enabled; mvpp2_cls_c2_read(entry->priv, entry->id, &c2); enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); seq_printf(s, "%d\n", enabled); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable); static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) { struct mvpp2_port *port = s->private; unsigned char byte[2], enable[2]; struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; unsigned long pmap; u16 rvid; int tid; for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); if (!priv->prs_shadow[tid].valid) continue; if (!test_bit(port->id, &pmap)) continue; mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); rvid = ((byte[0] & 0xf) << 8) + byte[1]; seq_printf(s, "%u\n", rvid); } return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid); static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused) { struct mvpp2_port *port = s->private; struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; unsigned long pmap; int i; for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { mvpp2_prs_init_from_hw(port->priv, &pe, i); pmap = mvpp2_prs_tcam_port_map_get(&pe); if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap)) seq_printf(s, "%03d\n", i); } return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser); static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused) { struct mvpp2_port *port = s->private; struct mvpp2 *priv = port->priv; struct mvpp2_prs_entry pe; unsigned long pmap; int index, tid; for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC || priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); /* We only want entries active on this port */ if (!test_bit(port->id, &pmap)) continue; /* Read mac addr from entry */ for (index = 0; index < ETH_ALEN; index++) mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], &da_mask[index]); seq_printf(s, "%pM\n", da); } return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter); static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2 *priv = entry->priv; seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu); static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2_prs_entry pe; unsigned int pmap; mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); pmap &= MVPP2_PRS_PORT_MASK; seq_printf(s, "%02x\n", pmap); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap); static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2_prs_entry pe; unsigned char ai, ai_mask; mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK; seq_printf(s, "%02x %02x\n", ai, ai_mask); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai); static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2_prs_entry pe; unsigned char data[8], mask[8]; int i; mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); for (i = 0; i < 8; i++) mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]); seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata); static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2_prs_entry pe; mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); seq_printf(s, "%*phN\n", 14, pe.sram); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; int val; val = mvpp2_prs_hits(entry->priv, entry->tid); if (val < 0) return val; seq_printf(s, "%d\n", val); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits); static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_prs_entry *entry = s->private; struct mvpp2 *priv = entry->priv; int tid = entry->tid; seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0); return 0; } DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid); static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, struct mvpp2_port *port, struct mvpp2_dbgfs_flow_entry *entry) { struct mvpp2_dbgfs_port_flow_entry *port_entry; struct dentry *port_dir; port_dir = debugfs_create_dir(port->dev->name, parent); port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id]; port_entry->port = port; port_entry->dbg_fe = entry; debugfs_create_file("hash_opts", 0444, port_dir, port_entry, &mvpp2_dbgfs_port_flow_hash_opt_fops); debugfs_create_file("engine", 0444, port_dir, port_entry, &mvpp2_dbgfs_port_flow_engine_fops); return 0; } static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, struct mvpp2 *priv, int flow) { struct mvpp2_dbgfs_flow_entry *entry; struct dentry *flow_entry_dir; char flow_entry_name[10]; int i, ret; sprintf(flow_entry_name, "%02d", flow); flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); entry = &priv->dbgfs_entries->flow_entries[flow]; entry->flow = flow; entry->priv = priv; debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, &mvpp2_dbgfs_flow_dec_hits_fops); debugfs_create_file("type", 0444, flow_entry_dir, entry, &mvpp2_dbgfs_flow_type_fops); debugfs_create_file("id", 0444, flow_entry_dir, entry, &mvpp2_dbgfs_flow_id_fops); /* Create entry for each port */ for (i = 0; i < priv->port_count; i++) { ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir, priv->port_list[i], entry); if (ret) return ret; } return 0; } static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) { struct dentry *flow_dir; int i, ret; flow_dir = debugfs_create_dir("flows", parent); for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); if (ret) return ret; } return 0; } static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, struct mvpp2 *priv, int tid) { struct mvpp2_dbgfs_prs_entry *entry; struct dentry *prs_entry_dir; char prs_entry_name[10]; if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; sprintf(prs_entry_name, "%03d", tid); prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); entry = &priv->dbgfs_entries->prs_entries[tid]; entry->tid = tid; entry->priv = priv; /* Create each attr */ debugfs_create_file("sram", 0444, prs_entry_dir, entry, &mvpp2_dbgfs_prs_sram_fops); debugfs_create_file("valid", 0644, prs_entry_dir, entry, &mvpp2_dbgfs_prs_valid_fops); debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry, &mvpp2_dbgfs_prs_lu_fops); debugfs_create_file("ai", 0644, prs_entry_dir, entry, &mvpp2_dbgfs_prs_ai_fops); debugfs_create_file("header_data", 0644, prs_entry_dir, entry, &mvpp2_dbgfs_prs_hdata_fops); debugfs_create_file("hits", 0444, prs_entry_dir, entry, &mvpp2_dbgfs_prs_hits_fops); debugfs_create_file("pmap", 0444, prs_entry_dir, entry, &mvpp2_dbgfs_prs_pmap_fops); return 0; } static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) { struct dentry *prs_dir; int i, ret; prs_dir = debugfs_create_dir("parser", parent); for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); if (ret) return ret; } return 0; } static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent, struct mvpp2 *priv, int id) { struct mvpp2_dbgfs_c2_entry *entry; struct dentry *c2_entry_dir; char c2_entry_name[10]; if (id >= MVPP22_CLS_C2_N_ENTRIES) return -EINVAL; sprintf(c2_entry_name, "%03d", id); c2_entry_dir = debugfs_create_dir(c2_entry_name, parent); entry = &priv->dbgfs_entries->c2_entries[id]; entry->id = id; entry->priv = priv; debugfs_create_file("hits", 0444, c2_entry_dir, entry, &mvpp2_dbgfs_flow_c2_hits_fops); debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry, &mvpp2_dbgfs_flow_c2_rxq_fops); debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry, &mvpp2_dbgfs_flow_c2_enable_fops); return 0; } static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent, struct mvpp2 *priv, int id) { struct mvpp2_dbgfs_flow_tbl_entry *entry; struct dentry *flow_tbl_entry_dir; char flow_tbl_entry_name[10]; if (id >= MVPP2_CLS_FLOWS_TBL_SIZE) return -EINVAL; sprintf(flow_tbl_entry_name, "%03d", id); flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent); entry = &priv->dbgfs_entries->flt_entries[id]; entry->id = id; entry->priv = priv; debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry, &mvpp2_dbgfs_flow_flt_hits_fops); return 0; } static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv) { struct dentry *cls_dir, *c2_dir, *flow_tbl_dir; int i, ret; cls_dir = debugfs_create_dir("classifier", parent); c2_dir = debugfs_create_dir("c2", cls_dir); for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) { ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i); if (ret) return ret; } flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir); for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) { ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i); if (ret) return ret; } return 0; } static int mvpp2_dbgfs_port_init(struct dentry *parent, struct mvpp2_port *port) { struct dentry *port_dir; port_dir = debugfs_create_dir(port->dev->name, parent); debugfs_create_file("parser_entries", 0444, port_dir, port, &mvpp2_dbgfs_port_parser_fops); debugfs_create_file("mac_filter", 0444, port_dir, port, &mvpp2_dbgfs_filter_fops); debugfs_create_file("vid_filter", 0444, port_dir, port, &mvpp2_dbgfs_port_vid_fops); return 0; } static struct dentry *mvpp2_root; void mvpp2_dbgfs_exit(void) { debugfs_remove(mvpp2_root); } void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); kfree(priv->dbgfs_entries); } void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { struct dentry *mvpp2_dir; int ret, i; if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); mvpp2_dir = debugfs_create_dir(name, mvpp2_root); priv->dbgfs_dir = mvpp2_dir; priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL); if (!priv->dbgfs_entries) goto err; ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); if (ret) goto err; ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv); if (ret) goto err; for (i = 0; i < priv->port_count; i++) { ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); if (ret) goto err; } ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv); if (ret) goto err; return; err: mvpp2_dbgfs_cleanup(priv); }
linux-master
drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
/*====================================================================== fmvj18x_cs.c 2.8 2002/03/23 A fmvj18x (and its compatibles) PCMCIA client driver Contributed by Shingo Fujimoto, [email protected] TDK LAK-CD021 and CONTEC C-NET(PC)C support added by Nobuhiro Katayama, [email protected] The PCMCIA client code is based on code written by David Hinds. Network code is based on the "FMV-18x driver" by Yutaka TAMIYA but is actually largely Donald Becker's AT1700 driver, which carries the following attribution: Written 1993-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as [email protected], or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 ======================================================================*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "fmvj18x_cs" #define DRV_VERSION "2.9" #include <linux/module.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <linux/uaccess.h> #include <asm/io.h> /*====================================================================*/ /* Module parameters */ MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) /* SRAM configuration */ /* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ INT_MODULE_PARM(sram_config, 0); /*====================================================================*/ /* PCMCIA event handlers */ static int fmvj18x_config(struct pcmcia_device *link); static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id); static int fmvj18x_setup_mfc(struct pcmcia_device *link); static void fmvj18x_release(struct pcmcia_device *link); static void fmvj18x_detach(struct pcmcia_device *p_dev); /* LAN controller(MBH86960A) specific routines */ static int fjn_config(struct net_device *dev, struct ifmap *map); static int fjn_open(struct net_device *dev); static int fjn_close(struct net_device *dev); static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t fjn_interrupt(int irq, void *dev_id); static void fjn_rx(struct net_device *dev); static void fjn_reset(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue); static const struct ethtool_ops netdev_ethtool_ops; /* card type */ enum cardtype { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, XXX10304, NEC, KME }; /* driver specific data structure */ struct local_info { struct pcmcia_device *p_dev; long open_time; uint tx_started:1; uint tx_queue; u_short tx_queue_len; enum cardtype cardtype; u_short sent; u_char __iomem *base; }; #define MC_FILTERBREAK 64 /*====================================================================*/ /* ioport offset from the base address */ #define TX_STATUS 0 /* transmit status register */ #define RX_STATUS 1 /* receive status register */ #define TX_INTR 2 /* transmit interrupt mask register */ #define RX_INTR 3 /* receive interrupt mask register */ #define TX_MODE 4 /* transmit mode register */ #define RX_MODE 5 /* receive mode register */ #define CONFIG_0 6 /* configuration register 0 */ #define CONFIG_1 7 /* configuration register 1 */ #define NODE_ID 8 /* node ID register (bank 0) */ #define MAR_ADR 8 /* multicast address registers (bank 1) */ #define DATAPORT 8 /* buffer mem port registers (bank 2) */ #define TX_START 10 /* transmit start register */ #define COL_CTRL 11 /* 16 collision control register */ #define BMPR12 12 /* reserved */ #define BMPR13 13 /* reserved */ #define RX_SKIP 14 /* skip received packet register */ #define LAN_CTRL 16 /* LAN card control register */ #define MAC_ID 0x1a /* hardware address */ #define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */ /* control bits */ #define ENA_TMT_OK 0x80 #define ENA_TMT_REC 0x20 #define ENA_COL 0x04 #define ENA_16_COL 0x02 #define ENA_TBUS_ERR 0x01 #define ENA_PKT_RDY 0x80 #define ENA_BUS_ERR 0x40 #define ENA_LEN_ERR 0x08 #define ENA_ALG_ERR 0x04 #define ENA_CRC_ERR 0x02 #define ENA_OVR_FLO 0x01 /* flags */ #define F_TMT_RDY 0x80 /* can accept new packet */ #define F_NET_BSY 0x40 /* carrier is detected */ #define F_TMT_OK 0x20 /* send packet successfully */ #define F_SRT_PKT 0x10 /* short packet error */ #define F_COL_ERR 0x04 /* collision error */ #define F_16_COL 0x02 /* 16 collision error */ #define F_TBUS_ERR 0x01 /* bus read error */ #define F_PKT_RDY 0x80 /* packet(s) in buffer */ #define F_BUS_ERR 0x40 /* bus read error */ #define F_LEN_ERR 0x08 /* short packet */ #define F_ALG_ERR 0x04 /* frame error */ #define F_CRC_ERR 0x02 /* CRC error */ #define F_OVR_FLO 0x01 /* overflow error */ #define F_BUF_EMP 0x40 /* receive buffer is empty */ #define F_SKP_PKT 0x05 /* drop packet in buffer */ /* default bitmaps */ #define D_TX_INTR ( ENA_TMT_OK ) #define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO ) #define TX_STAT_M ( F_TMT_RDY ) #define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO ) /* commands */ #define D_TX_MODE 0x06 /* no tests, detect carrier */ #define ID_MATCHED 0x02 /* (RX_MODE) */ #define RECV_ALL 0x03 /* (RX_MODE) */ #define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */ #define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */ #define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */ #define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */ #define BANK_0 0xa0 /* bank 0 (CONFIG_1) */ #define BANK_1 0xa4 /* bank 1 (CONFIG_1) */ #define BANK_2 0xa8 /* bank 2 (CONFIG_1) */ #define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */ #define DO_TX 0x80 /* do transmit packet */ #define SEND_PKT 0x81 /* send a packet */ #define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */ #define MANU_MODE 0x03 /* Stop and skip packet on 16 col */ #define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */ #define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */ #define INTR_OFF 0x0d /* LAN controller ignores interrupts */ #define INTR_ON 0x1d /* LAN controller will catch interrupts */ #define TX_TIMEOUT ((400*HZ)/1000) #define BANK_0U 0x20 /* bank 0 (CONFIG_1) */ #define BANK_1U 0x24 /* bank 1 (CONFIG_1) */ #define BANK_2U 0x28 /* bank 2 (CONFIG_1) */ static const struct net_device_ops fjn_netdev_ops = { .ndo_open = fjn_open, .ndo_stop = fjn_close, .ndo_start_xmit = fjn_start_xmit, .ndo_tx_timeout = fjn_tx_timeout, .ndo_set_config = fjn_config, .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int fmvj18x_probe(struct pcmcia_device *link) { struct local_info *lp; struct net_device *dev; dev_dbg(&link->dev, "fmvj18x_attach()\n"); /* Make up a FMVJ18x specific data structure */ dev = alloc_etherdev(sizeof(struct local_info)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); link->priv = dev; lp->p_dev = link; lp->base = NULL; /* The io structure describes IO port mapping */ link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* General socket configuration */ link->config_flags |= CONF_ENABLE_IRQ; dev->netdev_ops = &fjn_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &netdev_ethtool_ops; return fmvj18x_config(link); } /* fmvj18x_attach */ /*====================================================================*/ static void fmvj18x_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "fmvj18x_detach\n"); unregister_netdev(dev); fmvj18x_release(link); free_netdev(dev); } /* fmvj18x_detach */ /*====================================================================*/ static int mfc_try_io_port(struct pcmcia_device *link) { int i, ret; static const unsigned int serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; for (i = 0; i < 5; i++) { link->resource[1]->start = serial_base[i]; link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (link->resource[1]->start == 0) { link->resource[1]->end = 0; pr_notice("out of resource for serial\n"); } ret = pcmcia_request_io(link); if (ret == 0) return ret; } return ret; } static int ungermann_try_io_port(struct pcmcia_device *link) { int ret; unsigned int ioaddr; /* Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 0x380,0x3c0 only for ioport. */ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) { link->resource[0]->start = ioaddr; ret = pcmcia_request_io(link); if (ret == 0) { /* calculate ConfigIndex value */ link->config_index = ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; return ret; } } return ret; /* RequestIO failed */ } static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return 0; /* strange, but that's what the code did already before... */ } static int fmvj18x_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct local_info *lp = netdev_priv(dev); int i, ret; unsigned int ioaddr; enum cardtype cardtype; char *card_name = "unknown"; u8 *buf; size_t len; u_char buggybuf[32]; u8 addr[ETH_ALEN]; dev_dbg(&link->dev, "fmvj18x_config\n"); link->io_lines = 5; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); kfree(buf); if (len) { /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL); if (ret != 0) goto failed; switch (link->manf_id) { case MANFID_TDK: cardtype = TDK; if (link->card_id == PRODID_TDK_GN3410 || link->card_id == PRODID_TDK_NP9610 || link->card_id == PRODID_TDK_MN3200) { /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; } break; case MANFID_NEC: cardtype = NEC; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_KME: cardtype = KME; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_CONTEC: cardtype = CONTEC; break; case MANFID_FUJITSU: if (link->config_base == 0x0fe0) cardtype = MBH10302; else if (link->card_id == PRODID_FUJITSU_MBH10302) /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), but these are MBH10304 based card. */ cardtype = MBH10304; else if (link->card_id == PRODID_FUJITSU_MBH10304) cardtype = MBH10304; else cardtype = LA501; break; default: cardtype = MBH10304; } } else { /* old type card */ switch (link->manf_id) { case MANFID_FUJITSU: if (link->card_id == PRODID_FUJITSU_MBH10304) { cardtype = XXX10304; /* MBH10304 with buggy CIS */ link->config_index = 0x20; } else { cardtype = MBH10302; /* NextCom NC5310, etc. */ link->config_index = 1; } break; case MANFID_UNGERMANN: cardtype = UNGERMANN; break; default: cardtype = MBH10302; link->config_index = 1; } } if (link->resource[1]->end != 0) { ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { ret = ungermann_try_io_port(link); if (ret != 0) goto failed; } else { ret = pcmcia_request_io(link); if (ret) goto failed; } ret = pcmcia_request_irq(link, fjn_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (resource_size(link->resource[1]) != 0) { ret = fmvj18x_setup_mfc(link); if (ret != 0) goto failed; } ioaddr = dev->base_addr; /* Reset controller */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set hardware address */ switch (cardtype) { case MBH10304: case TDK: case LA501: case CONTEC: case NEC: case KME: if (cardtype == MBH10304) { card_name = "FMV-J182"; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); if (len < 11) { kfree(buf); goto failed; } /* Read MACID from CIS */ eth_hw_addr_set(dev, &buf[5]); kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) goto failed; if( cardtype == TDK ) { card_name = "TDK LAK-CD021"; } else if( cardtype == LA501 ) { card_name = "LA501"; } else if( cardtype == NEC ) { card_name = "PK-UG-J001"; } else if( cardtype == KME ) { card_name = "Panasonic"; } else { card_name = "C-NET(PC)C"; } } break; case UNGERMANN: /* Read MACID from register */ for (i = 0; i < 6; i++) addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); eth_hw_addr_set(dev, addr); card_name = "Access/CARD"; break; case XXX10304: /* Read MACID from Buggy CIS */ if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { pr_notice("unable to read hardware net address\n"); goto failed; } eth_hw_addr_set(dev, buggybuf); card_name = "FMV-J182"; break; case MBH10302: default: /* Read MACID from register */ for (i = 0; i < 6; i++) addr[i] = inb(ioaddr + MAC_ID + i); eth_hw_addr_set(dev, addr); card_name = "FMV-J181"; break; } lp->cardtype = cardtype; SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev) != 0) { pr_notice("register_netdev() failed\n"); goto failed; } /* print current configuration */ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n", card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", dev->base_addr, dev->irq, dev->dev_addr); return 0; failed: fmvj18x_release(link); return -ENODEV; } /* fmvj18x_config */ /*====================================================================*/ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) { u_char __iomem *base; int i, j; /* Allocate a small memory window */ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[2]->start = 0; link->resource[2]->end = 0; i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return -1; base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); if (!base) { pcmcia_release_window(link, link->resource[2]); return -1; } pcmcia_map_mem_page(link, link->resource[2], 0); /* * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff * 'xx' is garbage. * 'yy' is MAC address. */ for (i = 0; i < 0x200; i++) { if (readb(base+i*2) == 0x22) { if (readb(base+(i-1)*2) == 0xff && readb(base+(i+5)*2) == 0x04 && readb(base+(i+6)*2) == 0x06 && readb(base+(i+13)*2) == 0xff) break; } } if (i != 0x200) { for (j = 0 ; j < 6; j++,i++) { node_id[j] = readb(base+(i+7)*2); } } iounmap(base); j = pcmcia_release_window(link, link->resource[2]); return (i != 0x200) ? 0 : -1; } /* fmvj18x_get_hwinfo */ /*====================================================================*/ static int fmvj18x_setup_mfc(struct pcmcia_device *link) { int i; struct net_device *dev = link->priv; unsigned int ioaddr; struct local_info *lp = netdev_priv(dev); /* Allocate a small memory window */ link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[3]->start = link->resource[3]->end = 0; i = pcmcia_request_window(link, link->resource[3], 0); if (i != 0) return -1; lp->base = ioremap(link->resource[3]->start, resource_size(link->resource[3])); if (lp->base == NULL) { netdev_notice(dev, "ioremap failed\n"); return -1; } i = pcmcia_map_mem_page(link, link->resource[3], 0); if (i != 0) { iounmap(lp->base); lp->base = NULL; return -1; } ioaddr = dev->base_addr; writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */ writeb(0x0, lp->base+0x802); /* Config and Status Register */ writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */ writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */ writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */ writeb(0x8, lp->base+0x822); /* Config and Status Register */ return 0; } /*====================================================================*/ static void fmvj18x_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct local_info *lp = netdev_priv(dev); u_char __iomem *tmp; dev_dbg(&link->dev, "fmvj18x_release\n"); if (lp->base != NULL) { tmp = lp->base; lp->base = NULL; /* set NULL before iounmap */ iounmap(tmp); } pcmcia_disable_device(link); } static int fmvj18x_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int fmvj18x_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { fjn_reset(dev); netif_device_attach(dev); } return 0; } /*====================================================================*/ static const struct pcmcia_device_id fmvj18x_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004), PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59), PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("Eiger labs,Inc.", "EPX-10BT PC Card Ethernet 10BT", 0xf47e6c66, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "LAN Card(FMV-J182)", 0x6ee5a3d8, 0x5baf31db), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "MBH10308", 0x6ee5a3d8, 0x3f04875e), PCMCIA_DEVICE_PROD_ID12("FUJITSU TOWA", "LA501", 0xb8451188, 0x12939ba2), PCMCIA_DEVICE_PROD_ID12("HITACHI", "HT-4840-11", 0xf4f43949, 0x773910f4), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310B Ver1.0 ", 0x8cef4d3a, 0x075fc7b6), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310 Ver1.0 ", 0x8cef4d3a, 0xbccf43e6), PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "10BASE_T CARD R280", 0x85c10e17, 0xd9413666), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CD02x", 0x1eae9475, 0x8fa0ee70), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CF010", 0x1eae9475, 0x7683bc9a), PCMCIA_DEVICE_PROD_ID1("CONTEC Co.,Ltd.", 0x58d8fee2), PCMCIA_DEVICE_PROD_ID1("PCMCIA LAN MBH10304 ES", 0x2599f454), PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da), PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080), PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); static struct pcmcia_driver fmvj18x_cs_driver = { .owner = THIS_MODULE, .name = "fmvj18x_cs", .probe = fmvj18x_probe, .remove = fmvj18x_detach, .id_table = fmvj18x_ids, .suspend = fmvj18x_suspend, .resume = fmvj18x_resume, }; module_pcmcia_driver(fmvj18x_cs_driver); /*====================================================================*/ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) { struct net_device *dev = dev_id; struct local_info *lp = netdev_priv(dev); unsigned int ioaddr; unsigned short tx_stat, rx_stat; ioaddr = dev->base_addr; /* avoid multiple interrupts */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); /* get status */ tx_stat = inb(ioaddr + TX_STATUS); rx_stat = inb(ioaddr + RX_STATUS); /* clear status */ outb(tx_stat, ioaddr + TX_STATUS); outb(rx_stat, ioaddr + RX_STATUS); pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); pr_debug(" tx_status %02x.\n", tx_stat); if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { /* there is packet(s) in rx buffer */ fjn_rx(dev); } if (tx_stat & F_TMT_RDY) { dev->stats.tx_packets += lp->sent ; lp->sent = 0 ; if (lp->tx_queue) { outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; netif_trans_update(dev); } else { lp->tx_started = 0; } netif_wake_queue(dev); } pr_debug("%s: exiting interrupt,\n", dev->name); pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); if (lp->base != NULL) { /* Ack interrupt for multifunction card */ writeb(0x01, lp->base+0x802); writeb(0x09, lp->base+0x822); } return IRQ_HANDLED; } /* fjn_interrupt */ /*====================================================================*/ static void fjn_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; netdev_notice(dev, "transmit timed out with status %04x, %s?\n", htons(inw(ioaddr + TX_STATUS)), inb(ioaddr + TX_STATUS) & F_TMT_RDY ? "IRQ conflict" : "network cable problem"); netdev_notice(dev, "timeout registers: %04x %04x %04x " "%04x %04x %04x %04x %04x.\n", htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)), htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14))); dev->stats.tx_errors++; /* ToDo: We should try to restart the adaptor... */ local_irq_disable(); fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->sent = 0; lp->open_time = jiffies; local_irq_enable(); netif_wake_queue(dev); } static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; short length = skb->len; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } netif_stop_queue(dev); { unsigned char *buf = skb->data; if (length > ETH_FRAME_LEN) { netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n", length); return NETDEV_TX_BUSY; } netdev_dbg(dev, "Transmitting a packet of length %lu\n", (unsigned long)skb->len); dev->stats.tx_bytes += skb->len; /* Disable both interrupts. */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); outw(length, ioaddr + DATAPORT); outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); lp->tx_queue++; lp->tx_queue_len += ((length+3) & ~1); if (lp->tx_started == 0) { /* If the Tx is idle, always trigger a transmit. */ outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->tx_started = 1; netif_start_queue(dev); } else { if( sram_config == 0 ) { if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } else { if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) && lp->tx_queue < 127 ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } } /* Re-enable interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); } dev_kfree_skb (skb); return NETDEV_TX_OK; } /* fjn_start_xmit */ /*====================================================================*/ static void fjn_reset(struct net_device *dev) { struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int i; netdev_dbg(dev, "fjn_reset() called\n"); /* Reset controller */ if( sram_config == 0 ) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (lp->cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set Tx modes */ outb(D_TX_MODE, ioaddr + TX_MODE); /* set Rx modes */ outb(ID_MATCHED, ioaddr + RX_MODE); /* Set hardware address */ for (i = 0; i < 6; i++) outb(dev->dev_addr[i], ioaddr + NODE_ID + i); /* (re)initialize the multicast table */ set_rx_mode(dev); /* Switch to bank 2 (runtime mode) */ if (lp->cardtype == MBH10302) outb(BANK_2, ioaddr + CONFIG_1); else outb(BANK_2U, ioaddr + CONFIG_1); /* set 16col ctrl bits */ if( lp->cardtype == TDK || lp->cardtype == CONTEC) outb(TDK_AUTO_MODE, ioaddr + COL_CTRL); else outb(AUTO_MODE, ioaddr + COL_CTRL); /* clear Reserved Regs */ outb(0x00, ioaddr + BMPR12); outb(0x00, ioaddr + BMPR13); /* reset Skip packet reg. */ outb(0x01, ioaddr + RX_SKIP); /* Enable Tx and Rx */ if( sram_config == 0 ) outb(CONFIG0_DFL, ioaddr + CONFIG_0); else outb(CONFIG0_DFL_1, ioaddr + CONFIG_0); /* Init receive pointer ? */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); /* Clear all status */ outb(0xff, ioaddr + TX_STATUS); outb(0xff, ioaddr + RX_STATUS); if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); /* Turn on Rx interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); /* Turn on interrupts from LAN card controller */ if (lp->cardtype == MBH10302) outb(INTR_ON, ioaddr + LAN_CTRL); } /* fjn_reset */ /*====================================================================*/ static void fjn_rx(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; int boguscount = 10; /* 5 -> 10: by agy 19940922 */ pr_debug("%s: in rx_packet(), rx_status %02x.\n", dev->name, inb(ioaddr + RX_STATUS)); while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { u_short status = inw(ioaddr + DATAPORT); netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n", inb(ioaddr + RX_MODE), status); #ifndef final_version if (status == 0) { outb(F_SKP_PKT, ioaddr + RX_SKIP); break; } #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ dev->stats.rx_errors++; if (status & F_LEN_ERR) dev->stats.rx_length_errors++; if (status & F_ALG_ERR) dev->stats.rx_frame_errors++; if (status & F_CRC_ERR) dev->stats.rx_crc_errors++; if (status & F_OVR_FLO) dev->stats.rx_over_errors++; } else { u_short pkt_len = inw(ioaddr + DATAPORT); /* Malloc up new buffer. */ struct sk_buff *skb; if (pkt_len > 1550) { netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n", pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_errors++; break; } skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); skb->protocol = eth_type_trans(skb, dev); { int i; pr_debug("%s: Rxed packet of length %d: ", dev->name, pkt_len); for (i = 0; i < 14; i++) pr_debug(" %02x", skb->data[i]); pr_debug(".\n"); } netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } if (--boguscount <= 0) break; } /* If any worth-while packets have been received, dev_rint() has done a netif_wake_queue() for us and will work on them when we get to the bottom-half routine. */ /* if (lp->cardtype != TDK) { int i; for (i = 0; i < 20; i++) { if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP) break; (void)inw(ioaddr + DATAPORT); /+ dummy status read +/ outb(F_SKP_PKT, ioaddr + RX_SKIP); } if (i > 0) pr_debug("%s: Exint Rx packet with mode %02x after " "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); } */ } /* fjn_rx */ /*====================================================================*/ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static int fjn_config(struct net_device *dev, struct ifmap *map){ return 0; } static int fjn_open(struct net_device *dev) { struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; pr_debug("fjn_open('%s').\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->open_time = jiffies; netif_start_queue(dev); return 0; } /* fjn_open */ /*====================================================================*/ static int fjn_close(struct net_device *dev) { struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; pr_debug("fjn_close('%s').\n", dev->name); lp->open_time = 0; netif_stop_queue(dev); /* Set configuration register 0 to disable Tx and Rx. */ if( sram_config == 0 ) outb(CONFIG0_RST ,ioaddr + CONFIG_0); else outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0); /* Update the statistics -- ToDo. */ /* Power-down the chip. Green, green, green! */ outb(CHIP_OFF ,ioaddr + CONFIG_1); /* Set the ethernet adaptor disable IRQ */ if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); link->open--; return 0; } /* fjn_close */ /*====================================================================*/ /* Set the multicast/promiscuous mode for this adaptor. */ static void set_rx_mode(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; u_char mc_filter[8]; /* Multicast hash filter */ u_long flags; int i; int saved_bank; int saved_config_0 = inb(ioaddr + CONFIG_0); local_irq_save(flags); /* Disable Tx and Rx */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); if (dev->flags & IFF_PROMISC) { memset(mc_filter, 0xff, sizeof(mc_filter)); outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ } else if (netdev_mc_count(dev) > MC_FILTERBREAK || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } else if (netdev_mc_empty(dev)) { memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << (bit & 7)); } outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } /* Switch to bank 1 and set the multicast table. */ saved_bank = inb(ioaddr + CONFIG_1); outb(0xe4, ioaddr + CONFIG_1); for (i = 0; i < 8; i++) outb(mc_filter[i], ioaddr + MAR_ADR + i); outb(saved_bank, ioaddr + CONFIG_1); outb(saved_config_0, ioaddr + CONFIG_0); local_irq_restore(flags); }
linux-master
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
/* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2017, I2SE GmbH * * Permission to use, copy, modify, and/or distribute this software * for any purpose with or without fee is hereby granted, provided * that the above copyright notice and this permission notice appear * in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This module implements the Qualcomm Atheros UART protocol for * kernel-based UART device; it is essentially an Ethernet-to-UART * serial converter; */ #include <linux/device.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/sched.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <linux/types.h> #include "qca_7k_common.h" #define QCAUART_DRV_VERSION "0.1.0" #define QCAUART_DRV_NAME "qcauart" #define QCAUART_TX_TIMEOUT (1 * HZ) struct qcauart { struct net_device *net_dev; spinlock_t lock; /* transmit lock */ struct work_struct tx_work; /* Flushes transmit buffer */ struct serdev_device *serdev; struct qcafrm_handle frm_handle; struct sk_buff *rx_skb; unsigned char *tx_head; /* pointer to next XMIT byte */ int tx_left; /* bytes left in XMIT queue */ unsigned char *tx_buffer; }; static int qca_tty_receive(struct serdev_device *serdev, const unsigned char *data, size_t count) { struct qcauart *qca = serdev_device_get_drvdata(serdev); struct net_device *netdev = qca->net_dev; struct net_device_stats *n_stats = &netdev->stats; size_t i; if (!qca->rx_skb) { qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { n_stats->rx_errors++; n_stats->rx_dropped++; return 0; } } for (i = 0; i < count; i++) { s32 retcode; retcode = qcafrm_fsm_decode(&qca->frm_handle, qca->rx_skb->data, skb_tailroom(qca->rx_skb), data[i]); switch (retcode) { case QCAFRM_GATHER: case QCAFRM_NOHEAD: break; case QCAFRM_NOTAIL: netdev_dbg(netdev, "recv: no RX tail\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; case QCAFRM_INVLEN: netdev_dbg(netdev, "recv: invalid RX length\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; default: n_stats->rx_packets++; n_stats->rx_bytes += retcode; skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); skb_checksum_none_assert(qca->rx_skb); netif_rx(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(netdev, "recv: out of RX resources\n"); n_stats->rx_errors++; return i; } } } return i; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void qcauart_transmit(struct work_struct *work) { struct qcauart *qca = container_of(work, struct qcauart, tx_work); struct net_device_stats *n_stats = &qca->net_dev->stats; int written; spin_lock_bh(&qca->lock); /* First make sure we're connected. */ if (!netif_running(qca->net_dev)) { spin_unlock_bh(&qca->lock); return; } if (qca->tx_left <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ n_stats->tx_packets++; spin_unlock_bh(&qca->lock); netif_wake_queue(qca->net_dev); return; } written = serdev_device_write_buf(qca->serdev, qca->tx_head, qca->tx_left); if (written > 0) { qca->tx_left -= written; qca->tx_head += written; } spin_unlock_bh(&qca->lock); } /* Called by the driver when there's room for more data. * Schedule the transmit. */ static void qca_tty_wakeup(struct serdev_device *serdev) { struct qcauart *qca = serdev_device_get_drvdata(serdev); schedule_work(&qca->tx_work); } static const struct serdev_device_ops qca_serdev_ops = { .receive_buf = qca_tty_receive, .write_wakeup = qca_tty_wakeup, }; static int qcauart_netdev_open(struct net_device *dev) { struct qcauart *qca = netdev_priv(dev); netif_start_queue(qca->net_dev); return 0; } static int qcauart_netdev_close(struct net_device *dev) { struct qcauart *qca = netdev_priv(dev); netif_stop_queue(dev); flush_work(&qca->tx_work); spin_lock_bh(&qca->lock); qca->tx_left = 0; spin_unlock_bh(&qca->lock); return 0; } static netdev_tx_t qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *n_stats = &dev->stats; struct qcauart *qca = netdev_priv(dev); u8 pad_len = 0; int written; u8 *pos; spin_lock(&qca->lock); WARN_ON(qca->tx_left); if (!netif_running(dev)) { spin_unlock(&qca->lock); netdev_warn(qca->net_dev, "xmit: iface is down\n"); goto out; } pos = qca->tx_buffer; if (skb->len < QCAFRM_MIN_LEN) pad_len = QCAFRM_MIN_LEN - skb->len; pos += qcafrm_create_header(pos, skb->len + pad_len); memcpy(pos, skb->data, skb->len); pos += skb->len; if (pad_len) { memset(pos, 0, pad_len); pos += pad_len; } pos += qcafrm_create_footer(pos); netif_stop_queue(qca->net_dev); written = serdev_device_write_buf(qca->serdev, qca->tx_buffer, pos - qca->tx_buffer); if (written > 0) { qca->tx_left = (pos - qca->tx_buffer) - written; qca->tx_head = qca->tx_buffer + written; n_stats->tx_bytes += written; } spin_unlock(&qca->lock); netif_trans_update(dev); out: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qcauart *qca = netdev_priv(dev); netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", jiffies, dev_trans_start(dev)); dev->stats.tx_errors++; dev->stats.tx_dropped++; } static int qcauart_netdev_init(struct net_device *dev) { struct qcauart *qca = netdev_priv(dev); size_t len; /* Finish setting up the device info. */ dev->mtu = QCAFRM_MAX_MTU; dev->type = ARPHRD_ETHER; len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN; qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL); if (!qca->tx_buffer) return -ENOMEM; qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev, qca->net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) return -ENOBUFS; return 0; } static void qcauart_netdev_uninit(struct net_device *dev) { struct qcauart *qca = netdev_priv(dev); dev_kfree_skb(qca->rx_skb); } static const struct net_device_ops qcauart_netdev_ops = { .ndo_init = qcauart_netdev_init, .ndo_uninit = qcauart_netdev_uninit, .ndo_open = qcauart_netdev_open, .ndo_stop = qcauart_netdev_close, .ndo_start_xmit = qcauart_netdev_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = qcauart_netdev_tx_timeout, .ndo_validate_addr = eth_validate_addr, }; static void qcauart_netdev_setup(struct net_device *dev) { dev->netdev_ops = &qcauart_netdev_ops; dev->watchdog_timeo = QCAUART_TX_TIMEOUT; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; /* MTU range: 46 - 1500 */ dev->min_mtu = QCAFRM_MIN_MTU; dev->max_mtu = QCAFRM_MAX_MTU; } static const struct of_device_id qca_uart_of_match[] = { { .compatible = "qca,qca7000", }, {} }; MODULE_DEVICE_TABLE(of, qca_uart_of_match); static int qca_uart_probe(struct serdev_device *serdev) { struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart)); struct qcauart *qca; u32 speed = 115200; int ret; if (!qcauart_dev) return -ENOMEM; qcauart_netdev_setup(qcauart_dev); SET_NETDEV_DEV(qcauart_dev, &serdev->dev); qca = netdev_priv(qcauart_dev); if (!qca) { pr_err("qca_uart: Fail to retrieve private structure\n"); ret = -ENOMEM; goto free; } qca->net_dev = qcauart_dev; qca->serdev = serdev; qcafrm_fsm_init_uart(&qca->frm_handle); spin_lock_init(&qca->lock); INIT_WORK(&qca->tx_work, qcauart_transmit); of_property_read_u32(serdev->dev.of_node, "current-speed", &speed); ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&serdev->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); } netif_carrier_on(qca->net_dev); serdev_device_set_drvdata(serdev, qca); serdev_device_set_client_ops(serdev, &qca_serdev_ops); ret = serdev_device_open(serdev); if (ret) { dev_err(&serdev->dev, "Unable to open device %s\n", qcauart_dev->name); goto free; } speed = serdev_device_set_baudrate(serdev, speed); dev_info(&serdev->dev, "Using baudrate: %u\n", speed); serdev_device_set_flow_control(serdev, false); ret = register_netdev(qcauart_dev); if (ret) { dev_err(&serdev->dev, "Unable to register net device %s\n", qcauart_dev->name); serdev_device_close(serdev); cancel_work_sync(&qca->tx_work); goto free; } return 0; free: free_netdev(qcauart_dev); return ret; } static void qca_uart_remove(struct serdev_device *serdev) { struct qcauart *qca = serdev_device_get_drvdata(serdev); unregister_netdev(qca->net_dev); /* Flush any pending characters in the driver. */ serdev_device_close(serdev); cancel_work_sync(&qca->tx_work); free_netdev(qca->net_dev); } static struct serdev_device_driver qca_uart_driver = { .probe = qca_uart_probe, .remove = qca_uart_remove, .driver = { .name = QCAUART_DRV_NAME, .of_match_table = qca_uart_of_match, }, }; module_serdev_device_driver(qca_uart_driver); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver"); MODULE_AUTHOR("Qualcomm Atheros Communications"); MODULE_AUTHOR("Stefan Wahren <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(QCAUART_DRV_VERSION);
linux-master
drivers/net/ethernet/qualcomm/qca_uart.c
/* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH * * Permission to use, copy, modify, and/or distribute this software * for any purpose with or without fee is hereby granted, provided * that the above copyright notice and this permission notice appear * in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This module implements the Qualcomm Atheros SPI protocol for * kernel-based SPI device; it is essentially an Ethernet-to-SPI * serial converter; */ #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/spi/spi.h> #include <linux/types.h> #include "qca_7k.h" #include "qca_7k_common.h" #include "qca_debug.h" #include "qca_spi.h" #define MAX_DMA_BURST_LEN 5000 /* Modules parameters */ #define QCASPI_CLK_SPEED_MIN 1000000 #define QCASPI_CLK_SPEED_MAX 16000000 #define QCASPI_CLK_SPEED 8000000 static int qcaspi_clkspeed; module_param(qcaspi_clkspeed, int, 0); MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000."); #define QCASPI_BURST_LEN_MIN 1 #define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN static int qcaspi_burst_len = MAX_DMA_BURST_LEN; module_param(qcaspi_burst_len, int, 0); MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."); #define QCASPI_PLUGGABLE_MIN 0 #define QCASPI_PLUGGABLE_MAX 1 static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; module_param(qcaspi_pluggable, int, 0); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); #define QCASPI_WRITE_VERIFY_MIN 0 #define QCASPI_WRITE_VERIFY_MAX 3 static int wr_verify = QCASPI_WRITE_VERIFY_MIN; module_param(wr_verify, int, 0); MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3."); #define QCASPI_TX_TIMEOUT (1 * HZ) #define QCASPI_QCA7K_REBOOT_TIME_MS 1000 static void start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) { *intr_cause = 0; qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); } static void end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) { u16 intr_enable = (SPI_INT_CPU_ON | SPI_INT_PKT_AVLBL | SPI_INT_RDBUF_ERR | SPI_INT_WRBUF_ERR); qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0); qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify); netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); } static u32 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) { __be16 cmd; struct spi_message msg; struct spi_transfer transfer[2]; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); transfer[0].tx_buf = &cmd; transfer[0].len = QCASPI_CMD_LEN; transfer[1].tx_buf = src; transfer[1].len = len; spi_message_add_tail(&transfer[0], &msg); spi_message_add_tail(&transfer[1], &msg); ret = spi_sync(qca->spi_dev, &msg); if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } return len; } static u32 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) { struct spi_message msg; struct spi_transfer transfer; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); transfer.tx_buf = src; transfer.len = len; spi_message_add_tail(&transfer, &msg); ret = spi_sync(qca->spi_dev, &msg); if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } return len; } static u32 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) { struct spi_message msg; __be16 cmd; struct spi_transfer transfer[2]; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); transfer[0].tx_buf = &cmd; transfer[0].len = QCASPI_CMD_LEN; transfer[1].rx_buf = dst; transfer[1].len = len; spi_message_add_tail(&transfer[0], &msg); spi_message_add_tail(&transfer[1], &msg); ret = spi_sync(qca->spi_dev, &msg); if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } return len; } static u32 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) { struct spi_message msg; struct spi_transfer transfer; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); transfer.rx_buf = dst; transfer.len = len; spi_message_add_tail(&transfer, &msg); ret = spi_sync(qca->spi_dev, &msg); if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } return len; } static int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) { __be16 tx_data; struct spi_message msg; struct spi_transfer transfer; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); tx_data = cpu_to_be16(cmd); transfer.len = sizeof(cmd); transfer.tx_buf = &tx_data; spi_message_add_tail(&transfer, &msg); ret = spi_sync(qca->spi_dev, &msg); if (!ret) ret = msg.status; if (ret) qcaspi_spi_error(qca); return ret; } static int qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) { u32 count; u32 written; u32 offset; u32 len; len = skb->len; qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); offset = 0; while (len) { count = len; if (count > qca->burst_len) count = qca->burst_len; if (qca->legacy_mode) { written = qcaspi_write_legacy(qca, skb->data + offset, count); } else { written = qcaspi_write_burst(qca, skb->data + offset, count); } if (written != count) return -1; offset += count; len -= count; } return 0; } static int qcaspi_transmit(struct qcaspi *qca) { struct net_device_stats *n_stats = &qca->net_dev->stats; u16 available = 0; u32 pkt_len; u16 new_head; u16 packets = 0; if (qca->txr.skb[qca->txr.head] == NULL) return 0; qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); if (available > QCASPI_HW_BUF_LEN) { /* This could only happen by interferences on the SPI line. * So retry later ... */ qca->stats.buf_avail_err++; return -1; } while (qca->txr.skb[qca->txr.head]) { pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; if (available < pkt_len) { if (packets == 0) qca->stats.write_buf_miss++; break; } if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { qca->stats.write_err++; return -1; } packets++; n_stats->tx_packets++; n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; available -= pkt_len; /* remove the skb from the queue */ /* XXX After inconsistent lock states netif_tx_lock() * has been replaced by netif_tx_lock_bh() and so on. */ netif_tx_lock_bh(qca->net_dev); dev_kfree_skb(qca->txr.skb[qca->txr.head]); qca->txr.skb[qca->txr.head] = NULL; qca->txr.size -= pkt_len; new_head = qca->txr.head + 1; if (new_head >= qca->txr.count) new_head = 0; qca->txr.head = new_head; if (netif_queue_stopped(qca->net_dev)) netif_wake_queue(qca->net_dev); netif_tx_unlock_bh(qca->net_dev); } return 0; } static int qcaspi_receive(struct qcaspi *qca) { struct net_device *net_dev = qca->net_dev; struct net_device_stats *n_stats = &net_dev->stats; u16 available = 0; u32 bytes_read; u8 *cp; /* Allocate rx SKB if we don't have one available. */ if (!qca->rx_skb) { qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); qca->stats.out_of_mem++; return -1; } } /* Read the packet size. */ qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) { /* This could only happen by interferences on the SPI line. * So retry later ... */ qca->stats.buf_avail_err++; return -1; } else if (available == 0) { netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); return -1; } qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify); if (qca->legacy_mode) qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); while (available) { u32 count = available; if (count > qca->burst_len) count = qca->burst_len; if (qca->legacy_mode) { bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer, count); } else { bytes_read = qcaspi_read_burst(qca, qca->rx_buffer, count); } netdev_dbg(net_dev, "available: %d, byte read: %d\n", available, bytes_read); if (bytes_read) { available -= bytes_read; } else { qca->stats.read_err++; return -1; } cp = qca->rx_buffer; while ((bytes_read--) && (qca->rx_skb)) { s32 retcode; retcode = qcafrm_fsm_decode(&qca->frm_handle, qca->rx_skb->data, skb_tailroom(qca->rx_skb), *cp); cp++; switch (retcode) { case QCAFRM_GATHER: case QCAFRM_NOHEAD: break; case QCAFRM_NOTAIL: netdev_dbg(net_dev, "no RX tail\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; case QCAFRM_INVLEN: netdev_dbg(net_dev, "invalid RX length\n"); n_stats->rx_errors++; n_stats->rx_dropped++; break; default: qca->rx_skb->dev = qca->net_dev; n_stats->rx_packets++; n_stats->rx_bytes += retcode; skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); skb_checksum_none_assert(qca->rx_skb); netif_rx(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); n_stats->rx_errors++; qca->stats.out_of_mem++; break; } } } } return 0; } /* Check that tx ring stores only so much bytes * that fit into the internal QCA buffer. */ static int qcaspi_tx_ring_has_space(struct tx_ring *txr) { if (txr->skb[txr->tail]) return 0; return (txr->size + QCAFRM_MAX_LEN < QCASPI_HW_BUF_LEN) ? 1 : 0; } /* Flush the tx ring. This function is only safe to * call from the qcaspi_spi_thread. */ static void qcaspi_flush_tx_ring(struct qcaspi *qca) { int i; /* XXX After inconsistent lock states netif_tx_lock() * has been replaced by netif_tx_lock_bh() and so on. */ netif_tx_lock_bh(qca->net_dev); for (i = 0; i < TX_RING_MAX_LEN; i++) { if (qca->txr.skb[i]) { dev_kfree_skb(qca->txr.skb[i]); qca->txr.skb[i] = NULL; qca->net_dev->stats.tx_dropped++; } } qca->txr.tail = 0; qca->txr.head = 0; qca->txr.size = 0; netif_tx_unlock_bh(qca->net_dev); } static void qcaspi_qca7k_sync(struct qcaspi *qca, int event) { u16 signature = 0; u16 spi_config; u16 wrbuf_space = 0; if (event == QCASPI_EVENT_CPUON) { /* Read signature twice, if not valid * go back to unknown state. */ qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { if (qca->sync == QCASPI_SYNC_READY) qca->stats.bad_signature++; qca->sync = QCASPI_SYNC_UNKNOWN; netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n"); return; } else { /* ensure that the WRBUF is empty */ qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &wrbuf_space); if (wrbuf_space != QCASPI_HW_BUF_LEN) { netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n"); qca->sync = QCASPI_SYNC_UNKNOWN; } else { netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n"); qca->sync = QCASPI_SYNC_READY; return; } } } switch (qca->sync) { case QCASPI_SYNC_READY: /* Check signature twice, if not valid go to unknown state. */ qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { qca->sync = QCASPI_SYNC_UNKNOWN; qca->stats.bad_signature++; netdev_dbg(qca->net_dev, "sync: bad signature, restart\n"); /* don't reset right away */ return; } break; case QCASPI_SYNC_UNKNOWN: /* Read signature, if not valid stay in unknown state */ qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n"); return; } /* TODO: use GPIO to reset QCA7000 in legacy mode*/ netdev_dbg(qca->net_dev, "sync: resetting device.\n"); qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); spi_config |= QCASPI_SLAVE_RESET_BIT; qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0); qca->sync = QCASPI_SYNC_RESET; qca->stats.trig_reset++; qca->reset_count = 0; break; case QCASPI_SYNC_RESET: qca->reset_count++; netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", qca->reset_count); if (qca->reset_count >= QCASPI_RESET_TIMEOUT) { /* reset did not seem to take place, try again */ qca->sync = QCASPI_SYNC_UNKNOWN; qca->stats.reset_timeout++; netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n"); } break; } } static int qcaspi_spi_thread(void *data) { struct qcaspi *qca = data; u16 intr_cause = 0; netdev_info(qca->net_dev, "SPI thread created\n"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if ((qca->intr_req == qca->intr_svc) && !qca->txr.skb[qca->txr.head]) schedule(); set_current_state(TASK_RUNNING); netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", qca->intr_req - qca->intr_svc, qca->txr.skb[qca->txr.head]); qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); if (qca->sync != QCASPI_SYNC_READY) { netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n", (unsigned int)qca->sync); netif_stop_queue(qca->net_dev); netif_carrier_off(qca->net_dev); qcaspi_flush_tx_ring(qca); msleep(QCASPI_QCA7K_REBOOT_TIME_MS); } if (qca->intr_svc != qca->intr_req) { qca->intr_svc = qca->intr_req; start_spi_intr_handling(qca, &intr_cause); if (intr_cause & SPI_INT_CPU_ON) { qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); /* not synced. */ if (qca->sync != QCASPI_SYNC_READY) continue; qca->stats.device_reset++; netif_wake_queue(qca->net_dev); netif_carrier_on(qca->net_dev); } if (intr_cause & SPI_INT_RDBUF_ERR) { /* restart sync */ netdev_dbg(qca->net_dev, "===> rdbuf error!\n"); qca->stats.read_buf_err++; qca->sync = QCASPI_SYNC_UNKNOWN; continue; } if (intr_cause & SPI_INT_WRBUF_ERR) { /* restart sync */ netdev_dbg(qca->net_dev, "===> wrbuf error!\n"); qca->stats.write_buf_err++; qca->sync = QCASPI_SYNC_UNKNOWN; continue; } /* can only handle other interrupts * if sync has occurred */ if (qca->sync == QCASPI_SYNC_READY) { if (intr_cause & SPI_INT_PKT_AVLBL) qcaspi_receive(qca); } end_spi_intr_handling(qca, intr_cause); } if (qca->sync == QCASPI_SYNC_READY) qcaspi_transmit(qca); } set_current_state(TASK_RUNNING); netdev_info(qca->net_dev, "SPI thread exit\n"); return 0; } static irqreturn_t qcaspi_intr_handler(int irq, void *data) { struct qcaspi *qca = data; qca->intr_req++; if (qca->spi_thread) wake_up_process(qca->spi_thread); return IRQ_HANDLED; } static int qcaspi_netdev_open(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); int ret = 0; if (!qca) return -EINVAL; qca->intr_req = 1; qca->intr_svc = 0; qca->sync = QCASPI_SYNC_UNKNOWN; qcafrm_fsm_init_spi(&qca->frm_handle); qca->spi_thread = kthread_run((void *)qcaspi_spi_thread, qca, "%s", dev->name); if (IS_ERR(qca->spi_thread)) { netdev_err(dev, "%s: unable to start kernel thread.\n", QCASPI_DRV_NAME); return PTR_ERR(qca->spi_thread); } ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0, dev->name, qca); if (ret) { netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n", QCASPI_DRV_NAME, qca->spi_dev->irq, ret); kthread_stop(qca->spi_thread); return ret; } /* SPI thread takes care of TX queue */ return 0; } static int qcaspi_netdev_close(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); netif_stop_queue(dev); qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); free_irq(qca->spi_dev->irq, qca); kthread_stop(qca->spi_thread); qca->spi_thread = NULL; qcaspi_flush_tx_ring(qca); return 0; } static netdev_tx_t qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { u32 frame_len; u8 *ptmp; struct qcaspi *qca = netdev_priv(dev); u16 new_tail; struct sk_buff *tskb; u8 pad_len = 0; if (skb->len < QCAFRM_MIN_LEN) pad_len = QCAFRM_MIN_LEN - skb->len; if (qca->txr.skb[qca->txr.tail]) { netdev_warn(qca->net_dev, "queue was unexpectedly full!\n"); netif_stop_queue(qca->net_dev); qca->stats.ring_full++; return NETDEV_TX_BUSY; } if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) || (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) { tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); if (!tskb) { qca->stats.out_of_mem++; return NETDEV_TX_BUSY; } dev_kfree_skb(skb); skb = tskb; } frame_len = skb->len + pad_len; ptmp = skb_push(skb, QCAFRM_HEADER_LEN); qcafrm_create_header(ptmp, frame_len); if (pad_len) { ptmp = skb_put_zero(skb, pad_len); } ptmp = skb_put(skb, QCAFRM_FOOTER_LEN); qcafrm_create_footer(ptmp); netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n", skb->len); qca->txr.size += skb->len + QCASPI_HW_PKT_LEN; new_tail = qca->txr.tail + 1; if (new_tail >= qca->txr.count) new_tail = 0; qca->txr.skb[qca->txr.tail] = skb; qca->txr.tail = new_tail; if (!qcaspi_tx_ring_has_space(&qca->txr)) { netif_stop_queue(qca->net_dev); qca->stats.ring_full++; } netif_trans_update(dev); if (qca->spi_thread) wake_up_process(qca->spi_thread); return NETDEV_TX_OK; } static void qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qcaspi *qca = netdev_priv(dev); netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", jiffies, jiffies - dev_trans_start(dev)); qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ qca->sync = QCASPI_SYNC_UNKNOWN; if (qca->spi_thread) wake_up_process(qca->spi_thread); } static int qcaspi_netdev_init(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); dev->mtu = QCAFRM_MAX_MTU; dev->type = ARPHRD_ETHER; qca->clkspeed = qcaspi_clkspeed; qca->burst_len = qcaspi_burst_len; qca->spi_thread = NULL; qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + QCAFRM_FOOTER_LEN + 4) * 4; memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); if (!qca->rx_buffer) return -ENOBUFS; qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { kfree(qca->rx_buffer); netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); return -ENOBUFS; } return 0; } static void qcaspi_netdev_uninit(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); kfree(qca->rx_buffer); qca->buffer_size = 0; dev_kfree_skb(qca->rx_skb); } static const struct net_device_ops qcaspi_netdev_ops = { .ndo_init = qcaspi_netdev_init, .ndo_uninit = qcaspi_netdev_uninit, .ndo_open = qcaspi_netdev_open, .ndo_stop = qcaspi_netdev_close, .ndo_start_xmit = qcaspi_netdev_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = qcaspi_netdev_tx_timeout, .ndo_validate_addr = eth_validate_addr, }; static void qcaspi_netdev_setup(struct net_device *dev) { struct qcaspi *qca = NULL; dev->netdev_ops = &qcaspi_netdev_ops; qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; /* MTU range: 46 - 1500 */ dev->min_mtu = QCAFRM_MIN_MTU; dev->max_mtu = QCAFRM_MAX_MTU; qca = netdev_priv(dev); memset(qca, 0, sizeof(struct qcaspi)); memset(&qca->txr, 0, sizeof(qca->txr)); qca->txr.count = TX_RING_MAX_LEN; } static const struct of_device_id qca_spi_of_match[] = { { .compatible = "qca,qca7000" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_spi_of_match); static int qca_spi_probe(struct spi_device *spi) { struct qcaspi *qca = NULL; struct net_device *qcaspi_devs = NULL; u8 legacy_mode = 0; u16 signature; int ret; if (!spi->dev.of_node) { dev_err(&spi->dev, "Missing device tree\n"); return -EINVAL; } legacy_mode = of_property_read_bool(spi->dev.of_node, "qca,legacy-mode"); if (qcaspi_clkspeed == 0) { if (spi->max_speed_hz) qcaspi_clkspeed = spi->max_speed_hz; else qcaspi_clkspeed = QCASPI_CLK_SPEED; } if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { dev_err(&spi->dev, "Invalid clkspeed: %d\n", qcaspi_clkspeed); return -EINVAL; } if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { dev_err(&spi->dev, "Invalid burst len: %d\n", qcaspi_burst_len); return -EINVAL; } if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { dev_err(&spi->dev, "Invalid pluggable: %d\n", qcaspi_pluggable); return -EINVAL; } if (wr_verify < QCASPI_WRITE_VERIFY_MIN || wr_verify > QCASPI_WRITE_VERIFY_MAX) { dev_err(&spi->dev, "Invalid write verify: %d\n", wr_verify); return -EINVAL; } dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, qcaspi_clkspeed, qcaspi_burst_len, qcaspi_pluggable); spi->mode = SPI_MODE_3; spi->max_speed_hz = qcaspi_clkspeed; if (spi_setup(spi) < 0) { dev_err(&spi->dev, "Unable to setup SPI device\n"); return -EFAULT; } qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi)); if (!qcaspi_devs) return -ENOMEM; qcaspi_netdev_setup(qcaspi_devs); SET_NETDEV_DEV(qcaspi_devs, &spi->dev); qca = netdev_priv(qcaspi_devs); if (!qca) { free_netdev(qcaspi_devs); dev_err(&spi->dev, "Fail to retrieve private structure\n"); return -ENOMEM; } qca->net_dev = qcaspi_devs; qca->spi_dev = spi; qca->legacy_mode = legacy_mode; spi_set_drvdata(spi, qcaspi_devs); ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); dev_info(&spi->dev, "Using random MAC address: %pM\n", qca->net_dev->dev_addr); } netif_carrier_off(qca->net_dev); if (!qcaspi_pluggable) { qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { dev_err(&spi->dev, "Invalid signature (0x%04X)\n", signature); free_netdev(qcaspi_devs); return -EFAULT; } } if (register_netdev(qcaspi_devs)) { dev_err(&spi->dev, "Unable to register net device %s\n", qcaspi_devs->name); free_netdev(qcaspi_devs); return -EFAULT; } qcaspi_init_device_debugfs(qca); return 0; } static void qca_spi_remove(struct spi_device *spi) { struct net_device *qcaspi_devs = spi_get_drvdata(spi); struct qcaspi *qca = netdev_priv(qcaspi_devs); qcaspi_remove_device_debugfs(qca); unregister_netdev(qcaspi_devs); free_netdev(qcaspi_devs); } static const struct spi_device_id qca_spi_id[] = { { "qca7000", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, qca_spi_id); static struct spi_driver qca_spi_driver = { .driver = { .name = QCASPI_DRV_NAME, .of_match_table = qca_spi_of_match, }, .id_table = qca_spi_id, .probe = qca_spi_probe, .remove = qca_spi_remove, }; module_spi_driver(qca_spi_driver); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver"); MODULE_AUTHOR("Qualcomm Atheros Communications"); MODULE_AUTHOR("Stefan Wahren <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(QCASPI_DRV_VERSION);
linux-master
drivers/net/ethernet/qualcomm/qca_spi.c
/* * * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH * * Permission to use, copy, modify, and/or distribute this software * for any purpose with or without fee is hereby granted, provided * that the above copyright notice and this permission notice appear * in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /* This module implements the Qualcomm Atheros SPI protocol for * kernel-based SPI device. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/spi/spi.h> #include "qca_7k.h" void qcaspi_spi_error(struct qcaspi *qca) { if (qca->sync != QCASPI_SYNC_READY) return; netdev_err(qca->net_dev, "spi error\n"); qca->sync = QCASPI_SYNC_UNKNOWN; qca->stats.spi_err++; } int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) { __be16 rx_data; __be16 tx_data; struct spi_transfer transfer[2]; struct spi_message msg; int ret; memset(transfer, 0, sizeof(transfer)); spi_message_init(&msg); tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); *result = 0; transfer[0].tx_buf = &tx_data; transfer[0].len = QCASPI_CMD_LEN; transfer[1].rx_buf = &rx_data; transfer[1].len = QCASPI_CMD_LEN; spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { spi_sync(qca->spi_dev, &msg); spi_message_init(&msg); } spi_message_add_tail(&transfer[1], &msg); ret = spi_sync(qca->spi_dev, &msg); if (!ret) ret = msg.status; if (ret) qcaspi_spi_error(qca); else *result = be16_to_cpu(rx_data); return ret; } static int __qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) { __be16 tx_data[2]; struct spi_transfer transfer[2]; struct spi_message msg; int ret; memset(&transfer, 0, sizeof(transfer)); spi_message_init(&msg); tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); tx_data[1] = cpu_to_be16(value); transfer[0].tx_buf = &tx_data[0]; transfer[0].len = QCASPI_CMD_LEN; transfer[1].tx_buf = &tx_data[1]; transfer[1].len = QCASPI_CMD_LEN; spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { spi_sync(qca->spi_dev, &msg); spi_message_init(&msg); } spi_message_add_tail(&transfer[1], &msg); ret = spi_sync(qca->spi_dev, &msg); if (!ret) ret = msg.status; if (ret) qcaspi_spi_error(qca); return ret; } int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry) { int ret, i = 0; u16 confirmed; do { ret = __qcaspi_write_register(qca, reg, value); if (ret) return ret; if (!retry) return 0; ret = qcaspi_read_register(qca, reg, &confirmed); if (ret) return ret; ret = confirmed != value; if (!ret) return 0; i++; qca->stats.write_verify_failed++; } while (i <= retry); return ret; }
linux-master
drivers/net/ethernet/qualcomm/qca_7k.c
/* * Copyright (c) 2011, 2012, Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH * * Permission to use, copy, modify, and/or distribute this software * for any purpose with or without fee is hereby granted, provided * that the above copyright notice and this permission notice appear * in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Atheros ethernet framing. Every Ethernet frame is surrounded * by an atheros frame while transmitted over a serial channel; */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include "qca_7k_common.h" u16 qcafrm_create_header(u8 *buf, u16 length) { __le16 len; if (!buf) return 0; len = cpu_to_le16(length); buf[0] = 0xAA; buf[1] = 0xAA; buf[2] = 0xAA; buf[3] = 0xAA; buf[4] = len & 0xff; buf[5] = (len >> 8) & 0xff; buf[6] = 0; buf[7] = 0; return QCAFRM_HEADER_LEN; } EXPORT_SYMBOL_GPL(qcafrm_create_header); u16 qcafrm_create_footer(u8 *buf) { if (!buf) return 0; buf[0] = 0x55; buf[1] = 0x55; return QCAFRM_FOOTER_LEN; } EXPORT_SYMBOL_GPL(qcafrm_create_footer); /* Gather received bytes and try to extract a full ethernet frame by * following a simple state machine. * * Return: QCAFRM_GATHER No ethernet frame fully received yet. * QCAFRM_NOHEAD Header expected but not found. * QCAFRM_INVLEN Atheros frame length is invalid * QCAFRM_NOTAIL Footer expected but not found. * > 0 Number of byte in the fully received * Ethernet frame */ s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte) { s32 ret = QCAFRM_GATHER; u16 len; switch (handle->state) { case QCAFRM_HW_LEN0: case QCAFRM_HW_LEN1: /* by default, just go to next state */ handle->state--; if (recv_byte != 0x00) { /* first two bytes of length must be 0 */ handle->state = handle->init; } break; case QCAFRM_HW_LEN2: case QCAFRM_HW_LEN3: handle->state--; break; /* 4 bytes header pattern */ case QCAFRM_WAIT_AA1: case QCAFRM_WAIT_AA2: case QCAFRM_WAIT_AA3: case QCAFRM_WAIT_AA4: if (recv_byte != 0xAA) { ret = QCAFRM_NOHEAD; handle->state = handle->init; } else { handle->state--; } break; /* 2 bytes length. */ /* Borrow offset field to hold length for now. */ case QCAFRM_WAIT_LEN_BYTE0: handle->offset = recv_byte; handle->state = QCAFRM_WAIT_LEN_BYTE1; break; case QCAFRM_WAIT_LEN_BYTE1: handle->offset = handle->offset | (recv_byte << 8); handle->state = QCAFRM_WAIT_RSVD_BYTE1; break; case QCAFRM_WAIT_RSVD_BYTE1: handle->state = QCAFRM_WAIT_RSVD_BYTE2; break; case QCAFRM_WAIT_RSVD_BYTE2: len = handle->offset; if (len > buf_len || len < QCAFRM_MIN_LEN) { ret = QCAFRM_INVLEN; handle->state = handle->init; } else { handle->state = (enum qcafrm_state)(len + 1); /* Remaining number of bytes. */ handle->offset = 0; } break; default: /* Receiving Ethernet frame itself. */ buf[handle->offset] = recv_byte; handle->offset++; handle->state--; break; case QCAFRM_WAIT_551: if (recv_byte != 0x55) { ret = QCAFRM_NOTAIL; handle->state = handle->init; } else { handle->state = QCAFRM_WAIT_552; } break; case QCAFRM_WAIT_552: if (recv_byte != 0x55) { ret = QCAFRM_NOTAIL; handle->state = handle->init; } else { ret = handle->offset; /* Frame is fully received. */ handle->state = handle->init; } break; } return ret; } EXPORT_SYMBOL_GPL(qcafrm_fsm_decode); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common"); MODULE_AUTHOR("Qualcomm Atheros Communications"); MODULE_AUTHOR("Stefan Wahren <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/ethernet/qualcomm/qca_7k_common.c
/* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH * * Permission to use, copy, modify, and/or distribute this software * for any purpose with or without fee is hereby granted, provided * that the above copyright notice and this permission notice appear * in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file contains debugging routines for use in the QCA7K driver. */ #include <linux/debugfs.h> #include <linux/ethtool.h> #include <linux/seq_file.h> #include <linux/types.h> #include "qca_7k.h" #include "qca_debug.h" #define QCASPI_MAX_REGS 0x20 static const u16 qcaspi_spi_regs[] = { SPI_REG_BFR_SIZE, SPI_REG_WRBUF_SPC_AVA, SPI_REG_RDBUF_BYTE_AVA, SPI_REG_SPI_CONFIG, SPI_REG_SPI_STATUS, SPI_REG_INTR_CAUSE, SPI_REG_INTR_ENABLE, SPI_REG_RDBUF_WATERMARK, SPI_REG_WRBUF_WATERMARK, SPI_REG_SIGNATURE, SPI_REG_ACTION_CTRL }; /* The order of these strings must match the order of the fields in * struct qcaspi_stats * See qca_spi.h */ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { "Triggered resets", "Device resets", "Reset timeouts", "Read errors", "Write errors", "Read buffer errors", "Write buffer errors", "Out of memory", "Write buffer misses", "Transmit ring full", "SPI errors", "Write verify errors", "Buffer available errors", "Bad signature", }; #ifdef CONFIG_DEBUG_FS static int qcaspi_info_show(struct seq_file *s, void *what) { struct qcaspi *qca = s->private; seq_printf(s, "RX buffer size : %lu\n", (unsigned long)qca->buffer_size); seq_puts(s, "TX ring state : "); if (qca->txr.skb[qca->txr.head] == NULL) seq_puts(s, "empty"); else if (qca->txr.skb[qca->txr.tail]) seq_puts(s, "full"); else seq_puts(s, "in use"); seq_puts(s, "\n"); seq_printf(s, "TX ring size : %u\n", qca->txr.size); seq_printf(s, "Sync state : %u (", (unsigned int)qca->sync); switch (qca->sync) { case QCASPI_SYNC_UNKNOWN: seq_puts(s, "QCASPI_SYNC_UNKNOWN"); break; case QCASPI_SYNC_RESET: seq_puts(s, "QCASPI_SYNC_RESET"); break; case QCASPI_SYNC_READY: seq_puts(s, "QCASPI_SYNC_READY"); break; default: seq_puts(s, "INVALID"); break; } seq_puts(s, ")\n"); seq_printf(s, "IRQ : %d\n", qca->spi_dev->irq); seq_printf(s, "INTR REQ : %u\n", qca->intr_req); seq_printf(s, "INTR SVC : %u\n", qca->intr_svc); seq_printf(s, "SPI max speed : %lu\n", (unsigned long)qca->spi_dev->max_speed_hz); seq_printf(s, "SPI mode : %x\n", qca->spi_dev->mode); seq_printf(s, "SPI chip select : %u\n", (unsigned int)spi_get_chipselect(qca->spi_dev, 0)); seq_printf(s, "SPI legacy mode : %u\n", (unsigned int)qca->legacy_mode); seq_printf(s, "SPI burst length : %u\n", (unsigned int)qca->burst_len); return 0; } DEFINE_SHOW_ATTRIBUTE(qcaspi_info); void qcaspi_init_device_debugfs(struct qcaspi *qca) { qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL); debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca, &qcaspi_info_fops); } void qcaspi_remove_device_debugfs(struct qcaspi *qca) { debugfs_remove_recursive(qca->device_root); } #else /* CONFIG_DEBUG_FS */ void qcaspi_init_device_debugfs(struct qcaspi *qca) { } void qcaspi_remove_device_debugfs(struct qcaspi *qca) { } #endif static void qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p) { struct qcaspi *qca = netdev_priv(dev); strscpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver)); strscpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version)); strscpy(p->fw_version, "QCA7000", sizeof(p->fw_version)); strscpy(p->bus_info, dev_name(&qca->spi_dev->dev), sizeof(p->bus_info)); } static int qcaspi_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); cmd->base.speed = SPEED_10; cmd->base.duplex = DUPLEX_HALF; cmd->base.port = PORT_OTHER; cmd->base.autoneg = AUTONEG_DISABLE; return 0; } static void qcaspi_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) { struct qcaspi *qca = netdev_priv(dev); struct qcaspi_stats *st = &qca->stats; memcpy(data, st, ARRAY_SIZE(qcaspi_gstrings_stats) * sizeof(u64)); } static void qcaspi_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &qcaspi_gstrings_stats, sizeof(qcaspi_gstrings_stats)); break; default: WARN_ON(1); break; } } static int qcaspi_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(qcaspi_gstrings_stats); default: return -EINVAL; } } static int qcaspi_get_regs_len(struct net_device *dev) { return sizeof(u32) * QCASPI_MAX_REGS; } static void qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct qcaspi *qca = netdev_priv(dev); u32 *regs_buff = p; unsigned int i; regs->version = 1; memset(regs_buff, 0, sizeof(u32) * QCASPI_MAX_REGS); for (i = 0; i < ARRAY_SIZE(qcaspi_spi_regs); i++) { u16 offset, value; qcaspi_read_register(qca, qcaspi_spi_regs[i], &value); offset = qcaspi_spi_regs[i] >> 8; regs_buff[offset] = value; } } static void qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct qcaspi *qca = netdev_priv(dev); ring->rx_max_pending = 4; ring->tx_max_pending = TX_RING_MAX_LEN; ring->rx_pending = 4; ring->tx_pending = qca->txr.count; } static int qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); if ((ring->rx_pending) || (ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; if (netif_running(dev)) ops->ndo_stop(dev); qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); if (netif_running(dev)) ops->ndo_open(dev); return 0; } static const struct ethtool_ops qcaspi_ethtool_ops = { .get_drvinfo = qcaspi_get_drvinfo, .get_link = ethtool_op_get_link, .get_ethtool_stats = qcaspi_get_ethtool_stats, .get_strings = qcaspi_get_strings, .get_sset_count = qcaspi_get_sset_count, .get_regs_len = qcaspi_get_regs_len, .get_regs = qcaspi_get_regs, .get_ringparam = qcaspi_get_ringparam, .set_ringparam = qcaspi_set_ringparam, .get_link_ksettings = qcaspi_get_link_ksettings, }; void qcaspi_set_ethtool_ops(struct net_device *dev) { dev->ethtool_ops = &qcaspi_ethtool_ops; }
linux-master
drivers/net/ethernet/qualcomm/qca_debug.c